From 3c95e7ac95e133bce7ef8bf6f609dda2e8f331bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luka=20Govedi=C4=8D?= Date: Fri, 18 Apr 2025 08:11:30 -0400 Subject: [PATCH 1/5] Add device to imports (#82) --- depyf/explain/patched_lazy_format_graph_code.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depyf/explain/patched_lazy_format_graph_code.py b/depyf/explain/patched_lazy_format_graph_code.py index 28217d92..a0c73189 100644 --- a/depyf/explain/patched_lazy_format_graph_code.py +++ b/depyf/explain/patched_lazy_format_graph_code.py @@ -27,7 +27,7 @@ def patched_lazy_format_graph_code(name, gm, maybe_id=None, **kwargs): use_gm = True # use `print_readable` because it can include submodules - src = "from __future__ import annotations\nimport torch\n" + \ + src = "from __future__ import annotations\nimport torch\nfrom torch import device\n" + \ gm.print_readable(print_output=False) src = src.replace("", "GraphModule") try: From 461af54b38e07df88c33f5da81970bfc82eaa325 Mon Sep 17 00:00:00 2001 From: nopperl <54780682+nopperl@users.noreply.github.com> Date: Fri, 18 Apr 2025 21:15:58 +0900 Subject: [PATCH 2/5] Add missing default argument values in `patched_load_by_key_path` (#81) --- depyf/explain/patched_load_by_key_path.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/depyf/explain/patched_load_by_key_path.py b/depyf/explain/patched_load_by_key_path.py index 14fdaaf4..664f08ab 100644 --- a/depyf/explain/patched_load_by_key_path.py +++ b/depyf/explain/patched_load_by_key_path.py @@ -1,8 +1,8 @@ def patched_load_by_key_path( key: str, path: str, - linemap, - attrs, + linemap=None, + attrs=None, ): from depyf.explain.global_variables import data from depyf.explain.utils import write_code_to_file_template, get_current_compiled_fn_name From 5cde94f8fe9446c56ccb19df71dc9f887d4f948a Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 20 Apr 2025 14:58:07 +0800 Subject: [PATCH 3/5] update test files (#83) Signed-off-by: youkaichao --- .../__compiled_fn_1.Backward_graph.0.py | 38 +- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.Joint_graph.0.py | 56 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 56 +- .../__compiled_fn_11.Backward_graph.0.py | 34 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- .../__compiled_fn_11.Forward_graph.0.py | 22 +- .../__compiled_fn_11.Joint_graph.0.py | 36 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- ...mpiled_fn_11.tensorify_python_scalars.0.py | 36 +- .../__compiled_fn_5.Backward_graph.0.py | 18 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.Joint_graph.0.py | 26 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 26 +- .../__compiled_fn_7.Backward_graph.0.py | 38 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- .../__compiled_fn_7.Forward_graph.0.py | 28 +- .../__compiled_fn_7.Joint_graph.0.py | 56 +- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...ompiled_fn_7.tensorify_python_scalars.0.py | 56 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 11 +- .../__transformed_code_1_for_toy_function.py | 16 +- .../full_code_for_toy_function_0.py | 478 ++++++------------ .../full_code_for_toy_function_1.py | 478 ++++++------------ .../full_code_for_toy_function_2.py | 478 ++++++------------ .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 28 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- .../__compiled_fn_11.Forward_graph.0.py | 22 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- ...mpiled_fn_11.tensorify_python_scalars.0.py | 22 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 16 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- .../__compiled_fn_7.Forward_graph.0.py | 28 +- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...ompiled_fn_7.tensorify_python_scalars.0.py | 28 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 11 +- .../__transformed_code_1_for_toy_function.py | 16 +- .../full_code_for_toy_function_0.py | 478 ++++++------------ .../full_code_for_toy_function_1.py | 478 ++++++------------ .../full_code_for_toy_function_2.py | 478 ++++++------------ .../__compiled_fn_1.Backward_graph.0.py | 38 +- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.Joint_graph.0.py | 56 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 56 +- .../__compiled_fn_5.Backward_graph.0.py | 18 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.Joint_graph.0.py | 26 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 26 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- .../full_code_for_toy_function_0.py | 154 ++---- .../full_code_for_toy_function_1.py | 154 ++---- .../full_code_for_toy_function_2.py | 154 ++---- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 28 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 16 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- .../full_code_for_toy_function_0.py | 154 ++---- .../full_code_for_toy_function_1.py | 154 ++---- .../full_code_for_toy_function_2.py | 154 ++---- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 11 +- .../__transformed_code_1_for_toy_function.py | 16 +- .../full_code_for_toy_function_0.py | 478 ++++++------------ .../full_code_for_toy_function_1.py | 478 ++++++------------ .../full_code_for_toy_function_2.py | 478 ++++++------------ .../__compiled_fn_1.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 11 +- .../__transformed_code_1_for_toy_function.py | 16 +- .../full_code_for_toy_function_0.py | 478 ++++++------------ .../full_code_for_toy_function_1.py | 478 ++++++------------ .../full_code_for_toy_function_2.py | 478 ++++++------------ .../__compiled_fn_1.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- .../full_code_for_toy_function_0.py | 154 ++---- .../full_code_for_toy_function_1.py | 154 ++---- .../full_code_for_toy_function_2.py | 154 ++---- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- .../full_code_for_toy_function_0.py | 154 ++---- .../full_code_for_toy_function_1.py | 154 ++---- .../full_code_for_toy_function_2.py | 154 ++---- .../__compiled_fn_1.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_1.Backward_graph.0.py | 38 +- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.Joint_graph.0.py | 56 +- .../__compiled_fn_1.kernel_0.py | 80 +-- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 56 +- .../__compiled_fn_11.AFTER_POST_GRAD.0.py | 22 +- .../__compiled_fn_11.AFTER_POST_GRAD.1.py | 34 +- .../__compiled_fn_11.BEFORE_PRE_GRAD.0.py | 15 + .../__compiled_fn_11.Backward_graph.0.py | 34 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- .../__compiled_fn_11.Forward_graph.0.py | 22 +- .../__compiled_fn_11.Joint_graph.0.py | 36 +- .../__compiled_fn_11.kernel_0.py | 57 ++- .../__compiled_fn_11.kernel_1.py | 75 +-- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- ...mpiled_fn_11.tensorify_python_scalars.0.py | 36 +- .../__compiled_fn_5.BEFORE_PRE_GRAD.0.py | 12 + .../__compiled_fn_5.Backward_graph.0.py | 18 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.Joint_graph.0.py | 26 +- .../__compiled_fn_5.kernel_0.py | 33 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 26 +- .../__compiled_fn_7.AFTER_POST_GRAD.0.py | 28 +- .../__compiled_fn_7.AFTER_POST_GRAD.1.py | 38 +- .../__compiled_fn_7.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_7.Backward_graph.0.py | 38 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- .../__compiled_fn_7.Forward_graph.0.py | 28 +- .../__compiled_fn_7.Joint_graph.0.py | 56 +- .../__compiled_fn_7.kernel_0.py | 92 ++-- .../__compiled_fn_7.kernel_1.py | 115 +++-- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...ompiled_fn_7.tensorify_python_scalars.0.py | 56 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 11 +- .../__transformed_code_1_for_toy_function.py | 16 +- .../full_code_for_toy_function_0.py | 478 ++++++------------ .../full_code_for_toy_function_1.py | 478 ++++++------------ .../full_code_for_toy_function_2.py | 478 ++++++------------ .../__compiled_fn_1.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 15 - .../__compiled_fn_1.kernel_0.py | 86 ++-- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 15 - .../__compiled_fn_11.AFTER_POST_GRAD.0.py | 22 +- .../__compiled_fn_11.BEFORE_PRE_GRAD.0.py | 15 + .../__compiled_fn_11.Captured_Graph.0.py | 28 +- .../__compiled_fn_11.Forward_graph.0.py | 22 +- .../__compiled_fn_11.kernel_0.py | 55 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- ...mpiled_fn_11.tensorify_python_scalars.0.py | 22 +- .../__compiled_fn_5.BEFORE_PRE_GRAD.0.py | 12 + .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 9 - .../__compiled_fn_5.kernel_0.py | 33 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 9 - .../__compiled_fn_7.AFTER_POST_GRAD.0.py | 28 +- .../__compiled_fn_7.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_7.Captured_Graph.0.py | 34 +- .../__compiled_fn_7.Forward_graph.0.py | 28 +- .../__compiled_fn_7.kernel_0.py | 94 ++-- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...ompiled_fn_7.tensorify_python_scalars.0.py | 28 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 11 +- .../__transformed_code_1_for_toy_function.py | 16 +- .../full_code_for_toy_function_0.py | 478 ++++++------------ .../full_code_for_toy_function_1.py | 478 ++++++------------ .../full_code_for_toy_function_2.py | 478 ++++++------------ .../__compiled_fn_1.AFTER_POST_GRAD.0.py | 28 +- .../__compiled_fn_1.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_1.Backward_graph.0.py | 38 +- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.Joint_graph.0.py | 56 +- .../__compiled_fn_1.kernel_0.py | 80 +-- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 56 +- .../__compiled_fn_5.AFTER_POST_GRAD.0.py | 16 +- .../__compiled_fn_5.BEFORE_PRE_GRAD.0.py | 12 + .../__compiled_fn_5.Backward_graph.0.py | 18 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.Joint_graph.0.py | 26 +- .../__compiled_fn_5.kernel_0.py | 33 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 26 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- .../full_code_for_toy_function_0.py | 154 ++---- .../full_code_for_toy_function_1.py | 154 ++---- .../full_code_for_toy_function_2.py | 154 ++---- .../__compiled_fn_1.AFTER_POST_GRAD.0.py | 28 +- .../__compiled_fn_1.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.kernel_0.py | 86 ++-- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 28 +- .../__compiled_fn_5.AFTER_POST_GRAD.0.py | 16 +- .../__compiled_fn_5.BEFORE_PRE_GRAD.0.py | 12 + .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.kernel_0.py | 33 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 16 +- ...orch_dynamo_resume_in_toy_function_at_5.py | 6 +- .../__transformed_code_0_for_toy_function.py | 8 +- .../full_code_for_toy_function_0.py | 154 ++---- .../full_code_for_toy_function_1.py | 154 ++---- .../full_code_for_toy_function_2.py | 154 ++---- .../__compiled_fn_1.Backward_graph.0.py | 38 +- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.Joint_graph.0.py | 56 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 56 +- .../__compiled_fn_11.Backward_graph.0.py | 34 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- .../__compiled_fn_11.Forward_graph.0.py | 22 +- .../__compiled_fn_11.Joint_graph.0.py | 36 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- ...mpiled_fn_11.tensorify_python_scalars.0.py | 36 +- .../__compiled_fn_5.Backward_graph.0.py | 18 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.Joint_graph.0.py | 26 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 26 +- .../__compiled_fn_7.Backward_graph.0.py | 38 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- .../__compiled_fn_7.Forward_graph.0.py | 28 +- .../__compiled_fn_7.Joint_graph.0.py | 56 +- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...ompiled_fn_7.tensorify_python_scalars.0.py | 56 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../__transformed_code_1_for_forward.py | 16 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 11 +- .../full_code_for_forward_0.py | 478 ++++++------------ .../full_code_for_forward_1.py | 478 ++++++------------ .../full_code_for_forward_2.py | 478 ++++++------------ .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 28 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- .../__compiled_fn_11.Forward_graph.0.py | 22 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- ...mpiled_fn_11.tensorify_python_scalars.0.py | 22 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 16 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- .../__compiled_fn_7.Forward_graph.0.py | 28 +- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...ompiled_fn_7.tensorify_python_scalars.0.py | 28 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../__transformed_code_1_for_forward.py | 16 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 11 +- .../full_code_for_forward_0.py | 478 ++++++------------ .../full_code_for_forward_1.py | 478 ++++++------------ .../full_code_for_forward_2.py | 478 ++++++------------ .../__compiled_fn_1.Backward_graph.0.py | 38 +- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.Joint_graph.0.py | 56 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 56 +- .../__compiled_fn_5.Backward_graph.0.py | 18 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.Joint_graph.0.py | 26 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 26 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 154 ++---- .../full_code_for_forward_1.py | 154 ++---- .../full_code_for_forward_2.py | 154 ++---- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 28 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 16 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 154 ++---- .../full_code_for_forward_1.py | 154 ++---- .../full_code_for_forward_2.py | 154 ++---- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../__transformed_code_1_for_forward.py | 16 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 11 +- .../full_code_for_forward_0.py | 478 ++++++------------ .../full_code_for_forward_1.py | 478 ++++++------------ .../full_code_for_forward_2.py | 478 ++++++------------ .../__compiled_fn_1.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- .../__compiled_fn_11.Captured_Graph.0.py | 28 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- .../__compiled_fn_7.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../__transformed_code_1_for_forward.py | 16 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 11 +- .../full_code_for_forward_0.py | 478 ++++++------------ .../full_code_for_forward_1.py | 478 ++++++------------ .../full_code_for_forward_2.py | 478 ++++++------------ .../__compiled_fn_1.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 154 ++---- .../full_code_for_forward_1.py | 154 ++---- .../full_code_for_forward_2.py | 154 ++---- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 154 ++---- .../full_code_for_forward_1.py | 154 ++---- .../full_code_for_forward_2.py | 154 ++---- .../__compiled_fn_1.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_1.Backward_graph.0.py | 38 +- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.Joint_graph.0.py | 56 +- .../__compiled_fn_1.kernel_0.py | 80 +-- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 56 +- .../__compiled_fn_11.BEFORE_PRE_GRAD.0.py | 15 + .../__compiled_fn_11.Backward_graph.0.py | 18 - .../__compiled_fn_11.Captured_Graph.0.py | 28 +- .../__compiled_fn_11.Forward_graph.0.py | 12 - .../__compiled_fn_11.Joint_graph.0.py | 20 - .../__compiled_fn_11.kernel_0.py | 57 ++- .../__compiled_fn_11.kernel_1.py | 75 +-- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- ...mpiled_fn_11.tensorify_python_scalars.0.py | 20 - .../__compiled_fn_5.BEFORE_PRE_GRAD.0.py | 12 + .../__compiled_fn_5.Backward_graph.0.py | 18 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.Joint_graph.0.py | 26 +- .../__compiled_fn_5.kernel_0.py | 33 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 26 +- .../__compiled_fn_7.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_7.Backward_graph.0.py | 20 - .../__compiled_fn_7.Captured_Graph.0.py | 34 +- .../__compiled_fn_7.Forward_graph.0.py | 15 - .../__compiled_fn_7.Joint_graph.0.py | 30 -- .../__compiled_fn_7.kernel_0.py | 92 ++-- .../__compiled_fn_7.kernel_1.py | 115 +++-- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...ompiled_fn_7.tensorify_python_scalars.0.py | 30 -- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../__transformed_code_1_for_forward.py | 16 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 11 +- .../full_code_for_forward_0.py | 478 ++++++------------ .../full_code_for_forward_1.py | 478 ++++++------------ .../full_code_for_forward_2.py | 478 ++++++------------ .../__compiled_fn_1.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 15 - .../__compiled_fn_1.kernel_0.py | 86 ++-- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 15 - .../__compiled_fn_11.BEFORE_PRE_GRAD.0.py | 15 + .../__compiled_fn_11.Captured_Graph.0.py | 28 +- .../__compiled_fn_11.Forward_graph.0.py | 12 - .../__compiled_fn_11.kernel_0.py | 55 +- ...rred_runtime_asserts___compiled_fn_11.0.py | 28 +- ...mpiled_fn_11.tensorify_python_scalars.0.py | 12 - .../__compiled_fn_5.BEFORE_PRE_GRAD.0.py | 12 + .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 9 - .../__compiled_fn_5.kernel_0.py | 33 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 9 - .../__compiled_fn_7.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_7.Captured_Graph.0.py | 34 +- .../__compiled_fn_7.Forward_graph.0.py | 15 - .../__compiled_fn_7.kernel_0.py | 94 ++-- ...erred_runtime_asserts___compiled_fn_7.0.py | 34 +- ...ompiled_fn_7.tensorify_python_scalars.0.py | 15 - .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../__transformed_code_1_for_forward.py | 16 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 11 +- .../full_code_for_forward_0.py | 478 ++++++------------ .../full_code_for_forward_1.py | 478 ++++++------------ .../full_code_for_forward_2.py | 478 ++++++------------ .../__compiled_fn_1.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_1.Backward_graph.0.py | 38 +- .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 28 +- .../__compiled_fn_1.Joint_graph.0.py | 56 +- .../__compiled_fn_1.kernel_0.py | 80 +-- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 56 +- .../__compiled_fn_5.BEFORE_PRE_GRAD.0.py | 12 + .../__compiled_fn_5.Backward_graph.0.py | 18 +- .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 16 +- .../__compiled_fn_5.Joint_graph.0.py | 26 +- .../__compiled_fn_5.kernel_0.py | 33 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 26 +- .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 154 ++---- .../full_code_for_forward_1.py | 154 ++---- .../full_code_for_forward_2.py | 154 ++---- .../__compiled_fn_1.BEFORE_PRE_GRAD.0.py | 18 + .../__compiled_fn_1.Captured_Graph.0.py | 34 +- .../__compiled_fn_1.Forward_graph.0.py | 15 - .../__compiled_fn_1.kernel_0.py | 86 ++-- ...erred_runtime_asserts___compiled_fn_1.0.py | 34 +- ...ompiled_fn_1.tensorify_python_scalars.0.py | 15 - .../__compiled_fn_5.BEFORE_PRE_GRAD.0.py | 12 + .../__compiled_fn_5.Captured_Graph.0.py | 22 +- .../__compiled_fn_5.Forward_graph.0.py | 9 - .../__compiled_fn_5.kernel_0.py | 33 +- ...erred_runtime_asserts___compiled_fn_5.0.py | 22 +- ...ompiled_fn_5.tensorify_python_scalars.0.py | 9 - .../__transformed_code_0_for_forward.py | 8 +- ...or_torch_dynamo_resume_in_forward_at_15.py | 6 +- .../full_code_for_forward_0.py | 154 ++---- .../full_code_for_forward_1.py | 154 ++---- .../full_code_for_forward_2.py | 154 ++---- .../__compiled_fn_1.Captured_Graph.0.py | 20 +- ...erred_runtime_asserts___compiled_fn_1.0.py | 20 +- .../__transformed_code_0_for_f.py | 5 +- .../multiprocessing/full_code_for_f_0.py | 72 +-- .../multiprocessing/full_code_for_f_1.py | 72 +-- .../multiprocessing/full_code_for_f_2.py | 72 +-- 500 files changed, 13054 insertions(+), 21464 deletions(-) create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py create mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py delete mode 100644 tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py index 75ed5eb7..78571ac9 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return (add_1, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return (add_1, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py index 9d97d16d..c3aac4e0 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py index 71225da5..0e6b7e72 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 71225da5..0e6b7e72 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py index 496fa11b..8474b561 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return (None, mul_6, mul_5) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s52)", primals_2: "f32[s52]", primals_3: "f32[s52]", tangents_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py index da5ddffd..96da7ae6 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py index 9595a545..9ccce268 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None - return (mul_2, primals_2, primals_3, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s52)", primals_2: "f32[s52]", primals_3: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py index 343df7ad..56544217 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s52)"; primals_2: "f32[s52]"; primals_3: "f32[s52]"; tangents_1: "f32[s52]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 541c5b2b..85de6838 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py index 343df7ad..56544217 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s52)"; primals_2: "f32[s52]"; primals_3: "f32[s52]"; tangents_1: "f32[s52]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py index e6db532c..b7ab7e6f 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py @@ -1,10 +1,10 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return (mul_2, mul_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return (mul_2, mul_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py index 65d9fe55..a26c7762 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py index 7854b589..a1a82cea 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py index 7854b589..a1a82cea 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py index 803b21dd..4fd4caa6 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return (None, add_7, None, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s97)", primals_2: "f32[s97]", tangents_1: "f32[s97]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sgn: "f32[s97]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py index ced18d7f..bf43a539 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_2, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s97)", primals_2: "f32[s97]", primals_3: "Sym(s52)", primals_4: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_2, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py index 64287d39..6fcde534 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s97)"; primals_2: "f32[s97]"; primals_3: "Sym(s52)"; primals_4: "f32[s52]"; tangents_1: "f32[s97]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sgn: "f32[s97]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([lt, div, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py index 64287d39..6fcde534 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s97)"; primals_2: "f32[s97]"; primals_3: "Sym(s52)"; primals_4: "f32[s52]"; tangents_1: "f32[s97]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sgn: "f32[s97]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([lt, div, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index bc00f5f5..a3c62825 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py index 51445f0b..e7b305fa 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 845a00f9..33b3ee70 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1338952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fe05360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132ee9630>''' +___dict_contains = '''. at 0x16fc197e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133b42f80>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16ff9d5a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132ee9630>''' +___dict_contains = '''. at 0x16fc197e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1338952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fe05360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132ee9630>''' +___dict_contains = '''. at 0x16fc197e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133b42f80>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16ff9d5a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132ee9630>''' +___dict_contains = '''. at 0x16fc197e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133b41870>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16ff795a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132ee9630>''' +___dict_contains = '''. at 0x16fc197e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323191488) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4338112352) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4324260528) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4325590336) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133839900>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fc19870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132ee9630>''' +___dict_contains = '''. at 0x16fc197e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323191488) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4338112352) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4324260528) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4325590336) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 9ea82861..3d153432 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1498952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x309095360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1487e5630>''' +___dict_contains = '''. at 0x30902d7e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149a4af80>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x30922dd80>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1487e5630>''' +___dict_contains = '''. at 0x30902d7e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1498952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x309095360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1487e5630>''' +___dict_contains = '''. at 0x30902d7e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149a4af80>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x30922dd80>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1487e5630>''' +___dict_contains = '''. at 0x30902d7e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149a49870>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x3091dbac0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1487e5630>''' +___dict_contains = '''. at 0x30902d7e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359334592) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4402075648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332435664) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4333748528) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149835900>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x30902d870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1487e5630>''' +___dict_contains = '''. at 0x30902d7e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359334592) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4402075648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332435664) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4333748528) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 65688880..e4bdceac 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13da952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15eb0d360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d9e5630>''' +___dict_contains = '''. at 0x15e9097e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dc42f80>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15eda1870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d9e5630>''' +___dict_contains = '''. at 0x15e9097e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13da952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15eb0d360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d9e5630>''' +___dict_contains = '''. at 0x15e9097e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dc42f80>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15eda1870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d9e5630>''' +___dict_contains = '''. at 0x15e9097e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dc41870>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15ed53b50>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d9e5630>''' +___dict_contains = '''. at 0x15e9097e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332678144) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4397882384) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4395645616) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396975184) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13da39900>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15e909870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13d9e5630>''' +___dict_contains = '''. at 0x15e9097e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332678144) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4397882384) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4395645616) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396975184) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py index 21d4d18b..46dd4198 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 21d4d18b..46dd4198 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py index da5ddffd..96da7ae6 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py index 09af1230..f2157c06 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s52)", arg1_1: "f32[s52]", arg2_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 541c5b2b..85de6838 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py index 09af1230..f2157c06 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s52)", arg1_1: "f32[s52]", arg2_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py index c7d3fcde..2c9959a9 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py index c7d3fcde..2c9959a9 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py index a4ee2c65..20954b7b 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s97)", arg1_1: "f32[s97]", arg2_1: "Sym(s52)", arg3_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py index a4ee2c65..20954b7b 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s97)", arg1_1: "f32[s97]", arg2_1: "Sym(s52)", arg3_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index bc00f5f5..a3c62825 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py index 51445f0b..e7b305fa 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index ac8d9fb8..88e32da4 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122bbaef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14f9b5090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122b36950>''' +___dict_contains = '''. at 0x149d85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed2950>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14fc00af0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122b36950>''' +___dict_contains = '''. at 0x149d85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122bbaef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14f9b5090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122b36950>''' +___dict_contains = '''. at 0x149d85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed2950>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14fc00af0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122b36950>''' +___dict_contains = '''. at 0x149d85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed1120>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14fb9a830>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122b36950>''' +___dict_contains = '''. at 0x149d85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358187712) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424095744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321950224) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4323279392) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122bbadd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149d85480>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122b36950>''' +___dict_contains = '''. at 0x149d85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358187712) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424095744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321950224) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4323279392) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 077690ef..09f903b5 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f1baef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x308ab5090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f136950>''' +___dict_contains = '''. at 0x16fe85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f3d2440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16d2b8af0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f136950>''' +___dict_contains = '''. at 0x16fe85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f1baef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x308ab5090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f136950>''' +___dict_contains = '''. at 0x16fe85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f3d2440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16d2b8af0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f136950>''' +___dict_contains = '''. at 0x16fe85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f1f1510>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16d2593f0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f136950>''' +___dict_contains = '''. at 0x16fe85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4390972096) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394457088) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363926192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4426040080) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f1badd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fe85480>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f136950>''' +___dict_contains = '''. at 0x16fe85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4390972096) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394457088) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363926192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4426040080) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 22909954..1f7b2850 100644 --- a/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131dbaef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1543b5090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x131d36950>''' +___dict_contains = '''. at 0x147f85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1333d6440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1546fcaf0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x131d36950>''' +___dict_contains = '''. at 0x147f85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131dbaef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1543b5090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x131d36950>''' +___dict_contains = '''. at 0x147f85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1333d6440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1546fcaf0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x131d36950>''' +___dict_contains = '''. at 0x147f85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131df1120>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15469b370>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x131d36950>''' +___dict_contains = '''. at 0x147f85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4357549056) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4563556992) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391401920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4392714624) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131dbadd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x147f85480>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x131d36950>''' +___dict_contains = '''. at 0x147f85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4357549056) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4563556992) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391401920) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4392714624) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py index 75ed5eb7..78571ac9 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return (add_1, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return (add_1, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py index 9d97d16d..c3aac4e0 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py index 71225da5..0e6b7e72 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 71225da5..0e6b7e72 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py index e6db532c..b7ab7e6f 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py @@ -1,10 +1,10 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return (mul_2, mul_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return (mul_2, mul_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py index 65d9fe55..a26c7762 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py index 7854b589..a1a82cea 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py index 7854b589..a1a82cea 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index a58c5141..d2ccf4d9 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ed952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x309609360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11ece1630>''' +___dict_contains = '''. at 0x30948d7e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ed39900>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x30948d870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11ece1630>''' +___dict_contains = '''. at 0x30948d7e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323994544) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4405222576) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322425520) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4323951696) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 9404d3d1..f56e6470 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d5952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x310ef9360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d4e9630>''' +___dict_contains = '''. at 0x310e917e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d535900>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x310e91870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d4e9630>''' +___dict_contains = '''. at 0x310e917e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364987072) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4430387200) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330830112) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375708432) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 53e5d2ed..2c8c33f7 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14e9952d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x30580d360>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1432f5630>''' +___dict_contains = '''. at 0x3057a17e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14e939900>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x3057a1870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1432f5630>''' +___dict_contains = '''. at 0x3057a17e0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4395985600) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4717697024) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4399790768) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4401120336) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py index 21d4d18b..46dd4198 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 21d4d18b..46dd4198 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py index c7d3fcde..2c9959a9 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py index c7d3fcde..2c9959a9 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 6ae8ad17..4ac311bb 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1232baef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x147dad090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12323a950>''' +___dict_contains = '''. at 0x147985480>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1232badd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x147d03d00>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12323a950>''' +___dict_contains = '''. at 0x147985480>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363840192) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4585576448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325489088) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326801792) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index db4b5300..6de8ecd4 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128cbaef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16cab1090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x128c3a950>''' +___dict_contains = '''. at 0x16c685630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128cbadd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c685480>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x128c3a950>''' +___dict_contains = '''. at 0x16c685630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4351027904) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4354512896) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4340824832) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4421845696) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index a0d6ddce..d735c6fd 100644 --- a/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_aot_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12a4baef0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1512b1090>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12a436950>''' +___dict_contains = '''. at 0x150d85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12a4badd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x150d85480>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12a436950>''' +___dict_contains = '''. at 0x150d85630>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332350384) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4425145280) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329208096) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4330521200) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py index da5ddffd..96da7ae6 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 541c5b2b..85de6838 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index bc00f5f5..a3c62825 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py index 51445f0b..e7b305fa 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 139847d6..c6329770 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a768550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14746edd0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11a6169e0>''' +___dict_contains = '''. at 0x147585ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11b15f7f0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a041510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11a6169e0>''' +___dict_contains = '''. at 0x147585ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a768550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14746edd0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11a6169e0>''' +___dict_contains = '''. at 0x147585ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11b15f7f0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a041510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11a6169e0>''' +___dict_contains = '''. at 0x147585ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a7379a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x147587640>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11a6169e0>''' +___dict_contains = '''. at 0x147585ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4384337056) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4454505648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4386093104) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4387406048) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a736a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x147585c60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11a6169e0>''' +___dict_contains = '''. at 0x147585ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4384337056) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4454505648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4386093104) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4387406048) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index b52ad084..269e3ad7 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118868550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13ec6edd0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ff169e0>''' +___dict_contains = '''. at 0x13ed85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118a5bd90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f03d510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ff169e0>''' +___dict_contains = '''. at 0x13ed85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118868550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13ec6edd0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ff169e0>''' +___dict_contains = '''. at 0x13ed85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118a5bd90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f03d510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ff169e0>''' +___dict_contains = '''. at 0x13ed85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1188339a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13ed86560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ff169e0>''' +___dict_contains = '''. at 0x13ed85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4352404160) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4419901440) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356110224) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4357620176) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118832a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13ed85c60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ff169e0>''' +___dict_contains = '''. at 0x13ed85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4352404160) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4419901440) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356110224) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4357620176) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 7b41c9f9..831578b9 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f368550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17516add0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f1169e0>''' +___dict_contains = '''. at 0x175285ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f55aa70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17573d480>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f1169e0>''' +___dict_contains = '''. at 0x175285ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f368550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17516add0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f1169e0>''' +___dict_contains = '''. at 0x175285ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f55aa70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17573d480>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f1169e0>''' +___dict_contains = '''. at 0x175285ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f3339a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x175286560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f1169e0>''' +___dict_contains = '''. at 0x175285ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331858864) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4392409888) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330813968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404019744) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f332a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x175285c60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f1169e0>''' +___dict_contains = '''. at 0x175285ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331858864) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4392409888) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330813968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4404019744) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py index da5ddffd..96da7ae6 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 541c5b2b..85de6838 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index bc00f5f5..a3c62825 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py index 51445f0b..e7b305fa 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 4334a22a..098fb2eb 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc64790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x159cd8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb169e0>''' +___dict_contains = '''. at 0x159c85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc33490>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a0291b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb169e0>''' +___dict_contains = '''. at 0x159c85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc64790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x159cd8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb169e0>''' +___dict_contains = '''. at 0x159c85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc33490>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a0291b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb169e0>''' +___dict_contains = '''. at 0x159c85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fe5bf40>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x159c87a30>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb169e0>''' +___dict_contains = '''. at 0x159c85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331170496) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375861248) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387552000) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388881248) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fc32a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x159c85cf0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb169e0>''' +___dict_contains = '''. at 0x159c85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331170496) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375861248) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387552000) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388881248) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index a774a33e..ea4b1ccc 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c868790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157ed8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10bf169e0>''' +___dict_contains = '''. at 0x157e85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c833490>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168129120>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10bf169e0>''' +___dict_contains = '''. at 0x157e85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c868790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157ed8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10bf169e0>''' +___dict_contains = '''. at 0x157e85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c833490>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168129120>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10bf169e0>''' +___dict_contains = '''. at 0x157e85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10ca5bf40>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157e86950>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10bf169e0>''' +___dict_contains = '''. at 0x157e85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4352551616) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4356036448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4314232880) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4321804832) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10c832a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157e85cf0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10bf169e0>''' +___dict_contains = '''. at 0x157e85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4352551616) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4356036448) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4314232880) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4321804832) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index bbc840ad..dac0802b 100644 --- a/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d64790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a5d8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132c169e0>''' +___dict_contains = '''. at 0x15a585b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d33130>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a828f70>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132c169e0>''' +___dict_contains = '''. at 0x15a585b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d64790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a5d8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132c169e0>''' +___dict_contains = '''. at 0x15a585b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d33130>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a828f70>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132c169e0>''' +___dict_contains = '''. at 0x15a585b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d339a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a586950>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132c169e0>''' +___dict_contains = '''. at 0x15a585b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391267328) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4839332720) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4395629232) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396958800) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132d32a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a585cf0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x132c169e0>''' +___dict_contains = '''. at 0x15a585b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391267328) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4839332720) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4395629232) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396958800) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 64ff14d2..e506881a 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13296c550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14d76edd0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1328169e0>''' +___dict_contains = '''. at 0x16a081ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x132932a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a081c60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1328169e0>''' +___dict_contains = '''. at 0x16a081ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4357728960) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4406204256) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356995520) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358309024) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index f35bb144..2ec9409e 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128068550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x308d66dd0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ff129e0>''' +___dict_contains = '''. at 0x308e85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128032a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x308e85c60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10ff129e0>''' +___dict_contains = '''. at 0x308e85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4316769200) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375633232) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4394317952) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4472177424) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 7e285c29..69f90597 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13386c550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x164b6add0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13371a9e0>''' +___dict_contains = '''. at 0x164c85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133832a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x164c85c60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x13371a9e0>''' +___dict_contains = '''. at 0x164c85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364643008) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4368128000) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4394695424) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396221440) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 46738d0e..712ef02a 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120f6c790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15e7d8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120e169e0>''' +___dict_contains = '''. at 0x15e781b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120f36a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15e781cf0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120e169e0>''' +___dict_contains = '''. at 0x15e781b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332825280) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4348598112) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4382161344) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4600103536) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 9faad93a..4edf360e 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d46c790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15abd8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d3169e0>''' +___dict_contains = '''. at 0x15ab85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d436a70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15ab85cf0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d3169e0>''' +___dict_contains = '''. at 0x15ab85b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4404898816) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4709308976) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4351342480) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4352655424) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index 3e835984..6922fc78 100644 --- a/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_eager_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a36c790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1683d8160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11a21a9e0>''' +___dict_contains = '''. at 0x168385b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11a33aa70>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168385cf0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11a21a9e0>''' +___dict_contains = '''. at 0x168385b40>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364380864) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4438775808) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4324752128) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326081696) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..50380593 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py index 2d7f4057..ec98b14e 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return (add_1, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return (add_1, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py index 9d97d16d..c3aac4e0 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py index defa2f56..673a3b40 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py index f84427fe..9cae4c04 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -39,46 +41,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + } + { + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } + } } } } @@ -99,7 +115,7 @@ def call(args): cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_1, buf1, buf2, buf0) del buf1 del primals_2 - return (buf0, buf2, primals_1, ) + return (buf2, buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py index defa2f56..673a3b40 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py index 9595a545..9ccce268 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None - return (mul_2, primals_2, primals_3, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s52)", primals_2: "f32[s52]", primals_3: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.1.py index 496fa11b..8474b561 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.AFTER_POST_GRAD.1.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return (None, mul_6, mul_5) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s52)", primals_2: "f32[s52]", primals_3: "f32[s52]", tangents_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..96da7ae6 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,15 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py index 496fa11b..8474b561 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return (None, mul_6, mul_5) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s52)", primals_2: "f32[s52]", primals_3: "f32[s52]", tangents_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py index da5ddffd..96da7ae6 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py index 9595a545..9ccce268 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None - return (mul_2, primals_2, primals_3, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s52)", primals_2: "f32[s52]", primals_3: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py index 343df7ad..56544217 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s52)"; primals_2: "f32[s52]"; primals_3: "f32[s52]"; tangents_1: "f32[s52]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py index 615dbd6f..40d5c848 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,32 +30,37 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, const int64_t ks0) { { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } } } @@ -66,12 +73,12 @@ def call(args): primals_1, primals_2, primals_3 = args args.clear() - s0 = primals_1 - assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(primals_3, (s0, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_mul_0(primals_3, primals_2, buf0, s0) - return (buf0, primals_2, primals_3, s0, ) + s52 = primals_1 + assert_size_stride(primals_2, (s52, ), (1, )) + assert_size_stride(primals_3, (s52, ), (1, )) + buf0 = empty_strided_cpu((s52, ), (1, ), torch.float32) + cpp_fused_mul_0(primals_3, primals_2, buf0, s52) + return (buf0, primals_2, primals_3, s52, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py index b0beb052..8b0822f6 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, const float* in_ptr2, @@ -37,33 +39,38 @@ const int64_t ks0) { { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(4)); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - auto tmp7 = tmp0 * tmp6; - auto tmp8 = tmp7 * tmp3; - tmp5.store(out_ptr0 + static_cast(x0)); - tmp8.store(out_ptr1 + static_cast(x0)); - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - auto tmp7 = tmp0 * tmp6; - auto tmp8 = tmp7 * tmp3; - tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - tmp8.store(out_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + auto tmp7 = tmp0 * tmp6; + auto tmp8 = tmp7 * tmp3; + tmp5.store(out_ptr0 + static_cast(x0)); + tmp8.store(out_ptr1 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + auto tmp7 = tmp0 * tmp6; + auto tmp8 = tmp7 * tmp3; + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp8.store(out_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } } } @@ -76,13 +83,13 @@ def call(args): primals_1, primals_2, primals_3, tangents_1 = args args.clear() - s0 = primals_1 - assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(primals_3, (s0, ), (1, )) - assert_size_stride(tangents_1, (s0, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - buf1 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_mul_0(tangents_1, primals_2, primals_3, buf0, buf1, s0) + s52 = primals_1 + assert_size_stride(primals_2, (s52, ), (1, )) + assert_size_stride(primals_3, (s52, ), (1, )) + assert_size_stride(tangents_1, (s52, ), (1, )) + buf0 = empty_strided_cpu((s52, ), (1, ), torch.float32) + buf1 = empty_strided_cpu((s52, ), (1, ), torch.float32) + cpp_fused_mul_0(tangents_1, primals_2, primals_3, buf0, buf1, s52) del primals_2 del primals_3 del tangents_1 diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 541c5b2b..85de6838 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py index 343df7ad..56544217 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s52)"; primals_2: "f32[s52]"; primals_3: "f32[s52]"; tangents_1: "f32[s52]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..9a30bb15 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,12 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py index e6db532c..b7ab7e6f 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py @@ -1,10 +1,10 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return (mul_2, mul_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return (mul_2, mul_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py index 65d9fe55..a26c7762 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py index 7854b589..a1a82cea 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py index a2081e47..e4f328d7 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,25 +30,30 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + } + } } } } diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py index 7854b589..a1a82cea 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py index ced18d7f..bf43a539 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_2, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s97)", primals_2: "f32[s97]", primals_3: "Sym(s52)", primals_4: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_2, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.1.py index 732a1dfd..03249a11 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.AFTER_POST_GRAD.1.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return (None, add_7, None, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s97)", primals_2: "f32[s97]", tangents_1: "f32[s97]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sign: "f32[s97]" = torch.ops.aten.sign.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..74060e47 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py index 732a1dfd..03249a11 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return (None, add_7, None, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s97)", primals_2: "f32[s97]", tangents_1: "f32[s97]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sign: "f32[s97]" = torch.ops.aten.sign.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py index ced18d7f..bf43a539 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_2, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s97)", primals_2: "f32[s97]", primals_3: "Sym(s52)", primals_4: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_2, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py index ff9cdf79..e806ca85 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s97)"; primals_2: "f32[s97]"; primals_3: "Sym(s52)"; primals_4: "f32[s52]"; tangents_1: "f32[s97]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sign: "f32[s97]" = torch.ops.aten.sign.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([lt, div, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py index 643b9054..1418d119 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*', 'const int64_t', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -41,46 +43,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0(ks1); x0+=(static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))) == 0 ? 1 : static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))))) + } + { + for(int64_t x0=static_cast(0LL); x0(ks1); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))) && x0 < static_cast(ks1))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + } + } } } } @@ -93,17 +109,17 @@ def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() - s0 = primals_1 - s1 = primals_3 - assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(primals_4, (s1, ), (1, )) + s97 = primals_1 + s52 = primals_3 + assert_size_stride(primals_2, (s97, ), (1, )) + assert_size_stride(primals_4, (s52, ), (1, )) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_abs_add_div_lt_sum_0(primals_4, primals_2, buf1, buf2, buf0, s1, s0) + buf0 = empty_strided_cpu((s97, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(primals_4, primals_2, buf1, buf2, buf0, s52, s97) del buf1 del primals_4 - return (buf0, buf2, primals_2, s0, ) + return (buf2, buf0, primals_2, s97, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py index 4df30593..06b0ce7f 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,64 +30,69 @@ cpp_fused_abs_add_div_mul_neg_sgn_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, const int64_t ks0) { { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp1.abs(); - auto tmp3 = static_cast(1.0); - auto tmp4 = at::vec::Vectorized(tmp3); - auto tmp5 = tmp2 + tmp4; - auto tmp6 = tmp0 / tmp5; - auto tmp7 = tmp0.neg(); - auto tmp8 = tmp1 / tmp5; - auto tmp9 = tmp8 / tmp5; - auto tmp10 = tmp7 * tmp9; - auto tmp11 = - [&]() { - auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); - auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); - return left - right; + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp1.abs(); + auto tmp3 = static_cast(1.0); + auto tmp4 = at::vec::Vectorized(tmp3); + auto tmp5 = tmp2 + tmp4; + auto tmp6 = tmp0 / tmp5; + auto tmp7 = tmp0.neg(); + auto tmp8 = tmp1 / tmp5; + auto tmp9 = tmp8 / tmp5; + auto tmp10 = tmp7 * tmp9; + auto tmp11 = + [&]() + { + auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); + auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); + return left - right; + } + () + ; + auto tmp12 = tmp10 * tmp11; + auto tmp13 = tmp6 + tmp12; + tmp13.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = tmp1.abs(); + auto tmp3 = static_cast(1.0); + auto tmp4 = at::vec::Vectorized(tmp3); + auto tmp5 = tmp2 + tmp4; + auto tmp6 = tmp0 / tmp5; + auto tmp7 = tmp0.neg(); + auto tmp8 = tmp1 / tmp5; + auto tmp9 = tmp8 / tmp5; + auto tmp10 = tmp7 * tmp9; + auto tmp11 = + [&]() + { + auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); + auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); + return left - right; + } + () + ; + auto tmp12 = tmp10 * tmp11; + auto tmp13 = tmp6 + tmp12; + tmp13.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } } - () - ; - auto tmp12 = tmp10 * tmp11; - auto tmp13 = tmp6 + tmp12; - tmp13.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp2 = tmp1.abs(); - auto tmp3 = static_cast(1.0); - auto tmp4 = at::vec::Vectorized(tmp3); - auto tmp5 = tmp2 + tmp4; - auto tmp6 = tmp0 / tmp5; - auto tmp7 = tmp0.neg(); - auto tmp8 = tmp1 / tmp5; - auto tmp9 = tmp8 / tmp5; - auto tmp10 = tmp7 * tmp9; - auto tmp11 = - [&]() - { - auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); - auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); - return left - right; - } - () - ; - auto tmp12 = tmp10 * tmp11; - auto tmp13 = tmp6 + tmp12; - tmp13.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } } } @@ -98,11 +105,11 @@ def call(args): primals_1, primals_2, tangents_1 = args args.clear() - s0 = primals_1 - assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(tangents_1, (s0, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_abs_add_div_mul_neg_sgn_0(tangents_1, primals_2, buf0, s0) + s97 = primals_1 + assert_size_stride(primals_2, (s97, ), (1, )) + assert_size_stride(tangents_1, (s97, ), (1, )) + buf0 = empty_strided_cpu((s97, ), (1, ), torch.float32) + cpp_fused_abs_add_div_mul_neg_sgn_0(tangents_1, primals_2, buf0, s97) del primals_2 del tangents_1 return (None, buf0, None, None, ) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py index ff9cdf79..e806ca85 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s97)"; primals_2: "f32[s97]"; primals_3: "Sym(s52)"; primals_4: "f32[s52]"; tangents_1: "f32[s97]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sign: "f32[s97]" = torch.ops.aten.sign.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([lt, div, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index bc00f5f5..a3c62825 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py index 51445f0b..e7b305fa 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_toy_function.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 6da6a82d..90ee61f4 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122f8c3a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15ad41fc0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122891a20>''' +___dict_contains = '''. at 0x158684a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12368a320>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15ec55b40>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122891a20>''' +___dict_contains = '''. at 0x158684a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122f8c3a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15ad41fc0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122891a20>''' +___dict_contains = '''. at 0x158684a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12368a320>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15ec55b40>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122891a20>''' +___dict_contains = '''. at 0x158684a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12369e7a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15ea98700>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122891a20>''' +___dict_contains = '''. at 0x158684a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391529152) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395013984) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387928752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4389454928) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122f640d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x158685870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122891a20>''' +___dict_contains = '''. at 0x158684a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391529152) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395013984) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387928752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4389454928) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py index 74acf1da..f84d7655 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14c5803a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f93dfc0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14b791a20>''' +___dict_contains = '''. at 0x169e90a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14cdb63b0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x305a124d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14b791a20>''' +___dict_contains = '''. at 0x169e90a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14c5803a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f93dfc0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14b791a20>''' +___dict_contains = '''. at 0x169e90a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14cdb63b0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x305a124d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14b791a20>''' +___dict_contains = '''. at 0x169e90a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14cdb57e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x305a10700>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14b791a20>''' +___dict_contains = '''. at 0x169e90a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331203264) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4334688256) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391712816) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393025600) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14c5640d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x169e91870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14b791a20>''' +___dict_contains = '''. at 0x169e90a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4331203264) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4334688256) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391712816) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393025600) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py index 0c5c9ee2..861b14a5 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1375883a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fcb9fc0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x136f91a20>''' +___dict_contains = '''. at 0x16c994a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x137f14e50>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x315d364d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x136f91a20>''' +___dict_contains = '''. at 0x16c994a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1375883a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fcb9fc0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x136f91a20>''' +___dict_contains = '''. at 0x16c994a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x137f14e50>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x315d364d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x136f91a20>''' +___dict_contains = '''. at 0x16c994a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x137e268c0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x315d34700>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x136f91a20>''' +___dict_contains = '''. at 0x16c994a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323371712) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326856704) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4380768464) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388340416) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1375040d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c995870>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x136f91a20>''' +___dict_contains = '''. at 0x16c994a60>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4323371712) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326856704) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4380768464) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388340416) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..50380593 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py deleted file mode 100644 index 21d4d18b..00000000 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py index 466582ab..d0b9eff2 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -39,46 +41,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + } + { + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } + } } } } @@ -93,13 +109,13 @@ def call(args): args.clear() assert_size_stride(arg0_1, (10, ), (1, )) assert_size_stride(arg1_1, (10, ), (1, )) - buf1 = empty_strided_cpu((), (), torch.float32) + buf0 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) - cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf1, buf2, buf0) + buf1 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf0, buf2, buf1) del arg0_1 del arg1_1 - return (buf0, buf2, ) + return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py deleted file mode 100644 index 21d4d18b..00000000 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py index 09af1230..f2157c06 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.AFTER_POST_GRAD.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s52)", arg1_1: "f32[s52]", arg2_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..96da7ae6 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,15 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py index da5ddffd..96da7ae6 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py index 09af1230..f2157c06 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s52)", arg1_1: "f32[s52]", arg2_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py index 4447a169..7d81efa0 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,32 +30,37 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, const int64_t ks0) { { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } } } @@ -66,11 +73,11 @@ def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() - s0 = arg0_1 - assert_size_stride(arg1_1, (s0, ), (1, )) - assert_size_stride(arg2_1, (s0, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_mul_0(arg2_1, arg1_1, buf0, s0) + s52 = arg0_1 + assert_size_stride(arg1_1, (s52, ), (1, )) + assert_size_stride(arg2_1, (s52, ), (1, )) + buf0 = empty_strided_cpu((s52, ), (1, ), torch.float32) + cpp_fused_mul_0(arg2_1, arg1_1, buf0, s52) del arg1_1 del arg2_1 return (buf0, ) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 541c5b2b..85de6838 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py index 09af1230..f2157c06 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s52)", arg1_1: "f32[s52]", arg2_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..9a30bb15 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,12 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py deleted file mode 100644 index c7d3fcde..00000000 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py index a17b3a1d..35feb7e1 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,25 +30,30 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + } + } } } } diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py deleted file mode 100644 index c7d3fcde..00000000 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py index a4ee2c65..46a6ad8f 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.AFTER_POST_GRAD.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s97)", arg1_1: "f32[s97]", arg2_1: "Sym(s52)", arg3_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..74060e47 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py index a4ee2c65..20954b7b 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s97)", arg1_1: "f32[s97]", arg2_1: "Sym(s52)", arg3_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py index 326bb816..f21bf5a2 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*', 'const int64_t', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -41,46 +43,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0(ks1); x0+=(static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))) == 0 ? 1 : static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))))) + } + { + for(int64_t x0=static_cast(0LL); x0(ks1); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))) && x0 < static_cast(ks1))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + } + } } } } @@ -93,17 +109,17 @@ def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() - s0 = arg0_1 - s1 = arg2_1 - assert_size_stride(arg1_1, (s0, ), (1, )) - assert_size_stride(arg3_1, (s1, ), (1, )) - buf1 = empty_strided_cpu((), (), torch.float32) + s97 = arg0_1 + s52 = arg2_1 + assert_size_stride(arg1_1, (s97, ), (1, )) + assert_size_stride(arg3_1, (s52, ), (1, )) + buf0 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_abs_add_div_lt_sum_0(arg3_1, arg1_1, buf1, buf2, buf0, s1, s0) + buf1 = empty_strided_cpu((s97, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg3_1, arg1_1, buf0, buf2, buf1, s52, s97) del arg1_1 del arg3_1 - return (buf0, buf2, ) + return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 6deec089..74060e47 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py index a4ee2c65..20954b7b 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s97)", arg1_1: "f32[s97]", arg2_1: "Sym(s52)", arg3_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py index bc00f5f5..a3c62825 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py index 51445f0b..e7b305fa 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_toy_function.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 31ef0b2f..9c0d8598 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127b0add0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x151b9a200>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1274d9630>''' +___dict_contains = '''. at 0x151b99ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1501fb5b0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1562d3370>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1274d9630>''' +___dict_contains = '''. at 0x151b99ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x127b0add0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x151b9a200>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1274d9630>''' +___dict_contains = '''. at 0x151b99ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1501fb5b0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1562d3370>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1274d9630>''' +___dict_contains = '''. at 0x151b99ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x150039a20>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15309a050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1274d9630>''' +___dict_contains = '''. at 0x151b99ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4376423664) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428291088) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4318362048) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4319674912) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1279eb910>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x151b72560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1274d9630>''' +___dict_contains = '''. at 0x151b99ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4376423664) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428291088) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4318362048) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4319674912) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 78ba9a21..8c2f18b0 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12830add0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c342200>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fdd1630>''' +___dict_contains = '''. at 0x16c341ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a95cf0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fa4f370>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fdd1630>''' +___dict_contains = '''. at 0x16c341ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12830add0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c342200>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fdd1630>''' +___dict_contains = '''. at 0x16c341ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129a95cf0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fa4f370>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fdd1630>''' +___dict_contains = '''. at 0x16c341ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1299393f0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c59e050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fdd1630>''' +___dict_contains = '''. at 0x16c341ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4421512192) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4456602160) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4390893776) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4392403728) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1282eb910>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c31a560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fdd1630>''' +___dict_contains = '''. at 0x16c341ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4421512192) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4456602160) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4390893776) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4392403728) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py index d62c0611..8277be91 100644 --- a/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_with_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13890add0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x151c96200>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12fad1630>''' +___dict_contains = '''. at 0x151c95ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13914fac0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1554d7010>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12fad1630>''' +___dict_contains = '''. at 0x151c95ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13890add0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x151c96200>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12fad1630>''' +___dict_contains = '''. at 0x151c95ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13914fac0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1554d7010>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12fad1630>''' +___dict_contains = '''. at 0x151c95ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:6 in torch_dynamo_resume_in_toy_function_at_5 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x138908160>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15219e050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12fad1630>''' +___dict_contains = '''. at 0x151c95ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4409993920) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4721891328) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332009520) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4333322304) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_toy_function(a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_toy_function(a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1388eb910>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x151c6e560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12fad1630>''' +___dict_contains = '''. at 0x151c95ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4409993920) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4721891328) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332009520) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4333322304) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py index 9d97d16d..c3aac4e0 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..50380593 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py index 2d7f4057..ec98b14e 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return (add_1, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return (add_1, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py index 9d97d16d..c3aac4e0 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py index defa2f56..673a3b40 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py index f84427fe..9cae4c04 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -39,46 +41,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + } + { + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } + } } } } @@ -99,7 +115,7 @@ def call(args): cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_1, buf1, buf2, buf0) del buf1 del primals_2 - return (buf0, buf2, primals_1, ) + return (buf2, buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py index defa2f56..673a3b40 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py index 65d9fe55..a26c7762 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..9a30bb15 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,12 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py index e6db532c..b7ab7e6f 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py @@ -1,10 +1,10 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return (mul_2, mul_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return (mul_2, mul_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py index 65d9fe55..a26c7762 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py index 7854b589..a1a82cea 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py index a2081e47..e4f328d7 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,25 +30,30 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + } + } } } } diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py index 7854b589..a1a82cea 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py index 306a9c75..c6e760a5 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13a0c97e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16ef89e10>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d252560>''' +___dict_contains = '''. at 0x168e81ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13a065000>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168e81c60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d252560>''' +___dict_contains = '''. at 0x168e81ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4397214400) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4400699392) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356225072) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4357537856) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py index e4cb4a15..17be2224 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1226cd5a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1686ed5a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116d19b40>''' +___dict_contains = '''. at 0x154f85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122665000>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x154e2feb0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x116d19b40>''' +___dict_contains = '''. at 0x154f85ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4384386048) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4387888304) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4365073152) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4366599328) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py index c5c1f043..2893326f 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_with_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12f4c9360>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16bedde10>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11ff924d0>''' +___dict_contains = '''. at 0x168595ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12f465000>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168595c60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11ff924d0>''' +___dict_contains = '''. at 0x168595ab0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325911472) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4329413808) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355291424) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4356604528) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py index 21d4d18b..479ab936 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.AFTER_POST_GRAD.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..50380593 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py index 21d4d18b..46dd4198 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py index 466582ab..d0b9eff2 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -39,46 +41,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + } + { + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } + } } } } @@ -93,13 +109,13 @@ def call(args): args.clear() assert_size_stride(arg0_1, (10, ), (1, )) assert_size_stride(arg1_1, (10, ), (1, )) - buf1 = empty_strided_cpu((), (), torch.float32) + buf0 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) - cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf1, buf2, buf0) + buf1 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf0, buf2, buf1) del arg0_1 del arg1_1 - return (buf0, buf2, ) + return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 2b323639..50380593 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 21d4d18b..46dd4198 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:4 in toy_function, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:5 in toy_function, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py index c7d3fcde..2c9959a9 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.AFTER_POST_GRAD.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..9a30bb15 --- /dev/null +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,12 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py index c7d3fcde..2c9959a9 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py index a17b3a1d..35feb7e1 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,25 +30,30 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + } + } } } } diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 47764cc5..9a30bb15 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py index c7d3fcde..2c9959a9 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:7 in torch_dynamo_resume_in_toy_function_at_5, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py index 4c38db0a..1f9c0d4a 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py index fbff5ccd..3c4272b9 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_toy_function.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py index 0d050b57..c4e7e78c 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129275900>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a7b0a60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e2cd6c0>''' +___dict_contains = '''. at 0x16a785120>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x129194430>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a785750>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e2cd6c0>''' +___dict_contains = '''. at 0x16a785120>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4396493504) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4464990208) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321999376) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4323525552) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py index 3001117e..d5b656cc 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12957cca0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x3003b4a60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x128bcd510>''' +___dict_contains = '''. at 0x300389120>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1294943a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x300389750>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x128bcd510>''' +___dict_contains = '''. at 0x300389120>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4330106016) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4333608112) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4319951056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394582640) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py index e56786c1..5356cf40 100644 --- a/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py +++ b/tests/depyf_output/debug_function_inductor_without_dynamic_shape_without_grad/full_code_for_toy_function_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157d78ca0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15bab0a60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1572d9510>''' +___dict_contains = '''. at 0x15ba85120>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_toy_function_at_5(L, G, **___kwargs_ign __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_toy_function_at_5(b, x): a = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157ca05e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15ba85750>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1572d9510>''' +___dict_contains = '''. at 0x15ba85120>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_toy_function(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4317178560) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4367472640) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325423792) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326753360) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_toy_function(a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py index 4f1b2ac2..4c46eab4 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return (add_1, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return (add_1, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py index 94842116..f26b0341 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py index 84ed7d99..0c58023a 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 84ed7d99..0c58023a 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py index 731e3483..dc39f504 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return (None, mul_6, mul_5) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s52)", primals_2: "f32[s52]", primals_3: "f32[s52]", tangents_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return (None, mul_6, mul_5) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py index 97dd87fe..2cfd53c1 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py index b19f551f..bfa5bb03 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None - return (mul_2, primals_2, primals_3, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s52)", primals_2: "f32[s52]", primals_3: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None + return (mul_2, primals_2, primals_3, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py index aec22424..ea271a5b 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s52)"; primals_2: "f32[s52]"; primals_3: "f32[s52]"; tangents_1: "f32[s52]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 78cad323..87ae2123 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py index aec22424..ea271a5b 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s52)"; primals_2: "f32[s52]"; primals_3: "f32[s52]"; tangents_1: "f32[s52]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(primals_3, mul) + mul_4: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None + mul_5: "f32[s52]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul_6: "f32[s52]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None + return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py index caa124fb..28960e9f 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py @@ -1,10 +1,10 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return (mul_2, mul_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return (mul_2, mul_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py index f0055de7..8171524f 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py index 055b4058..2a8efc5b 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py index 055b4058..2a8efc5b 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py index 23637a4a..8c596a48 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return (None, add_7, None, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s97)", primals_2: "f32[s97]", tangents_1: "f32[s97]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sgn: "f32[s97]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return (None, add_7, None, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py index ce83484b..badb66a2 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_2, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "Sym(s97)", primals_2: "f32[s97]", primals_3: "Sym(s52)", primals_4: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_2, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py index 5136219d..3251faf3 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s97)"; primals_2: "f32[s97]"; primals_3: "Sym(s52)"; primals_4: "f32[s52]"; tangents_1: "f32[s97]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sgn: "f32[s97]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([lt, div, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py index 5136219d..3251faf3 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "Sym(s97)"; primals_2: "f32[s97]"; primals_3: "Sym(s52)"; primals_4: "f32[s52]"; tangents_1: "f32[s97]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sgn: "f32[s0]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(primals_2) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[s97]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[s97]" = torch.ops.aten.div.Tensor(primals_2, add_2) + div_2: "f32[s97]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None + mul_3: "f32[s97]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[s97]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None + sgn: "f32[s97]" = torch.ops.aten.sgn.default(primals_2); primals_2 = None + mul_4: "f32[s97]" = torch.ops.aten.mul.Tensor(mul_3, sgn); mul_3 = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_7: "f32[s97]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None + return pytree.tree_unflatten([lt, div, None, add_7, None, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py index a1e40aaf..9536e462 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 3deb3869..d1534085 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py index 2896bbee..4c58b16c 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e6a2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17ca01510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e5d7130>''' +___dict_contains = '''. at 0x17c723520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ea1a440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17cd68670>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e5d7130>''' +___dict_contains = '''. at 0x17c723520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e6a2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17ca01510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e5d7130>''' +___dict_contains = '''. at 0x17c723520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ea1a440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17cd68670>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e5d7130>''' +___dict_contains = '''. at 0x17c723520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e96f130>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17ca00160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e5d7130>''' +___dict_contains = '''. at 0x17c723520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326304448) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4329789280) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4320081968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4362076704) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e6631c0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17c7232e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e5d7130>''' +___dict_contains = '''. at 0x17c723520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326304448) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4329789280) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4320081968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4362076704) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py index c1c5fda2..333e5925 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1570a2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x314795510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x156fd3130>''' +___dict_contains = '''. at 0x314663520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15731e440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x3149bc670>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x156fd3130>''' +___dict_contains = '''. at 0x314663520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1570a2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x314795510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x156fd3130>''' +___dict_contains = '''. at 0x314663520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15731e440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x3149bc670>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x156fd3130>''' +___dict_contains = '''. at 0x314663520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157273130>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x314794160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x156fd3130>''' +___dict_contains = '''. at 0x314663520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325485488) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393688080) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363745968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4365272144) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15705f1c0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x3146632e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x156fd3130>''' +___dict_contains = '''. at 0x314663520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325485488) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393688080) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363745968) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4365272144) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py index cd2b25aa..c9cbe805 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x130ea2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16cc3d510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x130dd7130>''' +___dict_contains = '''. at 0x16cb83520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13121e440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16ce64af0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x130dd7130>''' +___dict_contains = '''. at 0x16cb83520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x130ea2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16cc3d510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x130dd7130>''' +___dict_contains = '''. at 0x16cb83520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13121e440>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16ce64af0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x130dd7130>''' +___dict_contains = '''. at 0x16cb83520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x131173130>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16cc3c160>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x130dd7130>''' +___dict_contains = '''. at 0x16cb83520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322306752) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4338112352) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4396890800) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4398220048) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x130e631c0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16cb832e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x130dd7130>''' +___dict_contains = '''. at 0x16cb83520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322306752) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4338112352) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4396890800) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4398220048) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py index b9545e37..5ca99091 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py index b9545e37..5ca99091 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py index 97dd87fe..2cfd53c1 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py index 2f386a3c..8522f76e 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s52)", arg1_1: "f32[s52]", arg2_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 78cad323..87ae2123 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py index 2f386a3c..8522f76e 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s52)", arg1_1: "f32[s52]", arg2_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + mul: "f32[s52]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_2: "f32[s52]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None + return (mul_2,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py index b036c79d..95da859f 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py index b036c79d..95da859f 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py index 722d8681..fc2be8f8 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s97)", arg1_1: "f32[s97]", arg2_1: "Sym(s52)", arg3_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py index 722d8681..fc2be8f8 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "Sym(s97)", arg1_1: "f32[s97]", arg2_1: "Sym(s52)", arg3_1: "f32[s52]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.ops.aten.abs.default(arg1_1) + add_2: "f32[s97]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[s97]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py index a1e40aaf..9536e462 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 3deb3869..d1534085 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py index 76a72386..2e6f07fd 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11101f400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1697bd3f0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110f537f0>''' +___dict_contains = '''. at 0x169763b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11127c040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x169ae6170>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110f537f0>''' +___dict_contains = '''. at 0x169763b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11101f400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1697bd3f0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110f537f0>''' +___dict_contains = '''. at 0x169763b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11127c040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x169ae6170>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110f537f0>''' +___dict_contains = '''. at 0x169763b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1111c3760>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x169763f40>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110f537f0>''' +___dict_contains = '''. at 0x169763b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4386548656) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4390051152) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4399168256) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4570743808) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11101c700>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x169763d90>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x110f537f0>''' +___dict_contains = '''. at 0x169763b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4386548656) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4390051152) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4399168256) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4570743808) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py index c1c11733..7428d93d 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11161b400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1475c11b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1114537f0>''' +___dict_contains = '''. at 0x147563b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111878040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1478ea170>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1114537f0>''' +___dict_contains = '''. at 0x147563b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11161b400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1475c11b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1114537f0>''' +___dict_contains = '''. at 0x147563b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111878040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1478ea170>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1114537f0>''' +___dict_contains = '''. at 0x147563b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1117c3760>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14787ac20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1114537f0>''' +___dict_contains = '''. at 0x147563b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391561920) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395046912) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326521360) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4328047056) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x111618700>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x147563d90>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1114537f0>''' +___dict_contains = '''. at 0x147563b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391561920) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4395046912) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326521360) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4328047056) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py index 93736ec8..701ab84b 100644 --- a/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12161b400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14ecbd1b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1214537f0>''' +___dict_contains = '''. at 0x14ec5fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121874040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14eeea050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1214537f0>''' +___dict_contains = '''. at 0x14ec5fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12161b400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14ecbd1b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1214537f0>''' +___dict_contains = '''. at 0x14ec5fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121874040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14eeea050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1214537f0>''' +___dict_contains = '''. at 0x14ec5fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121618ca0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14ee7a9e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1214537f0>''' +___dict_contains = '''. at 0x14ec5fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325763776) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4384249696) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364073648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4365599184) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121618700>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14ec5fd90>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1214537f0>''' +___dict_contains = '''. at 0x14ec5fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325763776) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4384249696) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364073648) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4365599184) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py index 4f1b2ac2..4c46eab4 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return (add_1, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return (add_1, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py index 94842116..f26b0341 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py index 84ed7d99..0c58023a 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 84ed7d99..0c58023a 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sgn: "f32[10]" = torch.ops.aten.sgn.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sgn); mul = sgn = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py index caa124fb..28960e9f 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py @@ -1,10 +1,10 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return (mul_2, mul_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return (mul_2, mul_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py index f0055de7..8171524f 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py index 055b4058..2a8efc5b 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py index 055b4058..2a8efc5b 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py index e4e9efa6..405099d4 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f2a2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16e63d510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f1db130>''' +___dict_contains = '''. at 0x16e38b520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f2631c0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16e38b2e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f1db130>''' +___dict_contains = '''. at 0x16e38b520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364233408) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4369569792) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4365236752) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4366746384) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py index 84103380..f7a33373 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1507a2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x30d73d510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1506db130>''' +___dict_contains = '''. at 0x30d667520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1507671c0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x30d6672e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1506db130>''' +___dict_contains = '''. at 0x30d667520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4397870080) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4585577488) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389714608) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4391240784) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py index 5e329d01..5ba384b3 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a7a2b90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16e93d510>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a6db130>''' +___dict_contains = '''. at 0x16e633520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a7631c0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16e6332e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x14a6db130>''' +___dict_contains = '''. at 0x16e633520>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4391267008) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4428289888) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4365597120) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4366909984) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py index b9545e37..5ca99091 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py index b9545e37..5ca99091 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py index b036c79d..95da859f 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py index b036c79d..95da859f 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py index d7715145..01ea87fc 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117a1b400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x3022bd3f0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1179537f0>''' +___dict_contains = '''. at 0x302263b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117a18700>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x302263d90>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1179537f0>''' +___dict_contains = '''. at 0x302263b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332367008) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4335868944) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4407327088) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4579132096) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py index bdf1cf2a..46ca5df3 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11b21f400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1468bd1b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11b1537f0>''' +___dict_contains = '''. at 0x146863b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11b21c700>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x146863d90>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11b1537f0>''' +___dict_contains = '''. at 0x146863b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354763456) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4432484192) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4316936960) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4318659904) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py index ef789c96..2e5da2c0 100644 --- a/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_aot_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e717400>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14fbb91b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e6537f0>''' +___dict_contains = '''. at 0x14fb63b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e714700>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14fb63d90>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e6537f0>''' +___dict_contains = '''. at 0x14fb63b50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389432000) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4468135936) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4321409552) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4365222432) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py index 97dd87fe..2cfd53c1 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 78cad323..87ae2123 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py index a1e40aaf..9536e462 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 3deb3869..d1534085 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py index ec3a2bf3..bb1f8b22 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122c95f30>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f398700>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122c02b90>''' +___dict_contains = '''. at 0x16f39bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed2f80>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f64b490>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122c02b90>''' +___dict_contains = '''. at 0x16f39bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122c95f30>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f398700>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122c02b90>''' +___dict_contains = '''. at 0x16f39bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed2f80>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f64b490>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122c02b90>''' +___dict_contains = '''. at 0x16f39bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122ed0c10>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f5ffd00>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122c02b90>''' +___dict_contains = '''. at 0x16f39bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364249952) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4575091408) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355226288) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4356752464) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x122c94790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f39be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x122c02b90>''' +___dict_contains = '''. at 0x16f39bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364249952) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4575091408) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355226288) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4356752464) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py index d3e39f25..505bff95 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120195cf0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149b9ae60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120106950>''' +___dict_contains = '''. at 0x149b9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1204d2dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149e43490>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120106950>''' +___dict_contains = '''. at 0x149b9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120195cf0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149b9ae60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120106950>''' +___dict_contains = '''. at 0x149b9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1204d2dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149e43490>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120106950>''' +___dict_contains = '''. at 0x149b9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1204d0ee0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149e40790>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120106950>''' +___dict_contains = '''. at 0x149b9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332448448) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4335933440) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329814704) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4331340880) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x120194550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x149b9be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x120106950>''' +___dict_contains = '''. at 0x149b9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332448448) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4335933440) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4329814704) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4331340880) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py index 4968f42e..5f7096e2 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b295cf0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a198700>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b20a950>''' +___dict_contains = '''. at 0x16a19bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b4ab0a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a547490>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b20a950>''' +___dict_contains = '''. at 0x16a19bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b295cf0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a198700>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b20a950>''' +___dict_contains = '''. at 0x16a19bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b4ab0a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a547490>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b20a950>''' +___dict_contains = '''. at 0x16a19bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b4a8ee0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a19add0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b20a950>''' +___dict_contains = '''. at 0x16a19bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4388055744) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4391540576) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356160176) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4357686352) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12b294550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a19be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12b20a950>''' +___dict_contains = '''. at 0x16a19bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4388055744) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4391540576) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356160176) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4357686352) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py index 97dd87fe..2cfd53c1 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 78cad323..87ae2123 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py index a1e40aaf..9536e462 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 3deb3869..d1534085 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py index 561cac35..4c8fd929 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d395e10>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14b5f6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d306b90>''' +___dict_contains = '''. at 0x14b59bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d1f5360>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14b8470a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d306b90>''' +___dict_contains = '''. at 0x14b59bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d395e10>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14b5f6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d306b90>''' +___dict_contains = '''. at 0x14b59bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d1f5360>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14b8470a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d306b90>''' +___dict_contains = '''. at 0x14b59bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d69beb0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14b5f4790>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d306b90>''' +___dict_contains = '''. at 0x14b59bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358761632) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412562608) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4365056768) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4366582944) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d394790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14b59be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x12d306b90>''' +___dict_contains = '''. at 0x14b59bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4358761632) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412562608) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4365056768) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4366582944) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py index 25bebcd6..d9bca22d 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112c94550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14f8f6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112ac8280>''' +___dict_contains = '''. at 0x14f89bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112f9bd90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14fb430a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112ac8280>''' +___dict_contains = '''. at 0x14f89bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112c94550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14f8f6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112ac8280>''' +___dict_contains = '''. at 0x14f89bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112f9bd90>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14fb430a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112ac8280>''' +___dict_contains = '''. at 0x14f89bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112f9be20>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14fb40af0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112ac8280>''' +___dict_contains = '''. at 0x14f89bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393380544) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4415707136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322506880) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4323820384) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x112c94280>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14f89be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x112ac8280>''' +___dict_contains = '''. at 0x14f89bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4393380544) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4415707136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322506880) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4323820384) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py index e9b0e2fc..01fb1a29 100644 --- a/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118695e10>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x176df6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118602b90>''' +___dict_contains = '''. at 0x176d9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11872b370>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x177842ef0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118602b90>''' +___dict_contains = '''. at 0x176d9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118695e10>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x176df6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118602b90>''' +___dict_contains = '''. at 0x176d9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11872b370>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x177842ef0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118602b90>''' +___dict_contains = '''. at 0x176d9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11889beb0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x176d9ae60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118602b90>''' +___dict_contains = '''. at 0x176d9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325026496) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4403124224) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364122800) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4366091184) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x118694790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x176d9be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x118602b90>''' +___dict_contains = '''. at 0x176d9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4325026496) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4403124224) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364122800) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4366091184) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py index 3df1ba23..f9e22510 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117b95f30>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x150f98700>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x117b02b90>''' +___dict_contains = '''. at 0x150f9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117b94790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x150f9be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x117b02b90>''' +___dict_contains = '''. at 0x150f9bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364970688) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4423047008) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364367760) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4365877712) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py index e2f169c9..1e831e85 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121395cf0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15819ae60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x121306950>''' +___dict_contains = '''. at 0x15819bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x121394550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15819be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x121306950>''' +___dict_contains = '''. at 0x15819bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353452736) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4379006976) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4324128976) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4325638528) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py index 5920d6ca..876eceaa 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x115d95cf0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14239ae60>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x115d0a950>''' +___dict_contains = '''. at 0x14239bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x115d94550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14239be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x115d0a950>''' +___dict_contains = '''. at 0x14239bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4318472896) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4321957888) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4387207936) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4388537184) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py index 82ee3d8d..4e55ccfc 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x124a98550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1667f6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1249c4280>''' +___dict_contains = '''. at 0x16679bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x124a98280>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16679be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1249c4280>''' +___dict_contains = '''. at 0x16679bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4332088000) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4335572992) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4368431152) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412408352) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py index 044aa93a..a0a5694f 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117f95e10>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c4f6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x117f02b90>''' +___dict_contains = '''. at 0x16c49bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x117f94790>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c49be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x117f02b90>''' +___dict_contains = '''. at 0x16c49bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4362693312) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4394735456) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4326357200) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4391436992) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py index 59308f73..c9009212 100644 --- a/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_eager_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e895bd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15c5f6050>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e806950>''' +___dict_contains = '''. at 0x15c59bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e894550>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15c59be20>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11e806950>''' +___dict_contains = '''. at 0x15c59bc70>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4328336064) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4376811360) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4396890640) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4398416576) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..2ef4069c --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py index a4ea69d4..289f0924 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return (add_1, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return (add_1, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py index 94842116..f26b0341 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py index 32406778..12e5174f 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py index f84427fe..9cae4c04 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -39,46 +41,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + } + { + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } + } } } } @@ -99,7 +115,7 @@ def call(args): cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_1, buf1, buf2, buf0) del buf1 del primals_2 - return (buf0, buf2, primals_1, ) + return (buf2, buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 32406778..12e5174f 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..2cfd53c1 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,15 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py deleted file mode 100644 index 731e3483..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Backward_graph.0.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return (None, mul_6, mul_5) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py index 97dd87fe..2cfd53c1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py deleted file mode 100644 index b19f551f..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Forward_graph.0.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1) - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul); mul = None - return (mul_2, primals_2, primals_3, primals_1) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py deleted file mode 100644 index aec22424..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.Joint_graph.0.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py index 615dbd6f..40d5c848 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,32 +30,37 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, const int64_t ks0) { { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } } } @@ -66,12 +73,12 @@ def call(args): primals_1, primals_2, primals_3 = args args.clear() - s0 = primals_1 - assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(primals_3, (s0, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_mul_0(primals_3, primals_2, buf0, s0) - return (buf0, primals_2, primals_3, s0, ) + s52 = primals_1 + assert_size_stride(primals_2, (s52, ), (1, )) + assert_size_stride(primals_3, (s52, ), (1, )) + buf0 = empty_strided_cpu((s52, ), (1, ), torch.float32) + cpp_fused_mul_0(primals_3, primals_2, buf0, s52) + return (buf0, primals_2, primals_3, s52, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py index b0beb052..8b0822f6 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.kernel_1.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, const float* in_ptr2, @@ -37,33 +39,38 @@ const int64_t ks0) { { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(4)); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - auto tmp7 = tmp0 * tmp6; - auto tmp8 = tmp7 * tmp3; - tmp5.store(out_ptr0 + static_cast(x0)); - tmp8.store(out_ptr1 + static_cast(x0)); - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - auto tmp7 = tmp0 * tmp6; - auto tmp8 = tmp7 * tmp3; - tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - tmp8.store(out_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + auto tmp7 = tmp0 * tmp6; + auto tmp8 = tmp7 * tmp3; + tmp5.store(out_ptr0 + static_cast(x0)); + tmp8.store(out_ptr1 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp6 = at::vec::Vectorized::loadu(in_ptr2 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + auto tmp7 = tmp0 * tmp6; + auto tmp8 = tmp7 * tmp3; + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp8.store(out_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } } } @@ -76,13 +83,13 @@ def call(args): primals_1, primals_2, primals_3, tangents_1 = args args.clear() - s0 = primals_1 - assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(primals_3, (s0, ), (1, )) - assert_size_stride(tangents_1, (s0, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - buf1 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_mul_0(tangents_1, primals_2, primals_3, buf0, buf1, s0) + s52 = primals_1 + assert_size_stride(primals_2, (s52, ), (1, )) + assert_size_stride(primals_3, (s52, ), (1, )) + assert_size_stride(tangents_1, (s52, ), (1, )) + buf0 = empty_strided_cpu((s52, ), (1, ), torch.float32) + buf1 = empty_strided_cpu((s52, ), (1, ), torch.float32) + cpp_fused_mul_0(tangents_1, primals_2, primals_3, buf0, buf1, s52) del primals_2 del primals_3 del tangents_1 diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 78cad323..87ae2123 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py deleted file mode 100644 index aec22424..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_11.tensorify_python_scalars.0.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "f32[s0]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_2, -1); primals_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(primals_3, mul) - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, primals_3); primals_3 = None - mul_5: "f32[s0]" = torch.ops.aten.mul.Tensor(tangents_1, mul); tangents_1 = mul = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul_6: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_4, -1); mul_4 = None - return pytree.tree_unflatten([mul_2, None, mul_6, mul_5], self._out_spec) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..3765b6ed --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,12 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py index caa124fb..28960e9f 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py @@ -1,10 +1,10 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return (mul_2, mul_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return (mul_2, mul_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py index f0055de7..8171524f 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py index 055b4058..2a8efc5b 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py index a2081e47..e4f328d7 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,25 +30,30 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + } + } } } } diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py index 055b4058..2a8efc5b 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..14896cd1 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py deleted file mode 100644 index 489e3e02..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Backward_graph.0.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", tangents_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return (None, add_7, None, None) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py deleted file mode 100644 index ce83484b..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Forward_graph.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals_1: "Sym(s0)", primals_2: "f32[s0]", primals_3: "Sym(s1)", primals_4: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2); add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_2, primals_1) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py deleted file mode 100644 index ef6c1324..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.Joint_graph.0.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py index 643b9054..1418d119 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*', 'const int64_t', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -41,46 +43,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0(ks1); x0+=(static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))) == 0 ? 1 : static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))))) + } + { + for(int64_t x0=static_cast(0LL); x0(ks1); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))) && x0 < static_cast(ks1))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + } + } } } } @@ -93,17 +109,17 @@ def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() - s0 = primals_1 - s1 = primals_3 - assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(primals_4, (s1, ), (1, )) + s97 = primals_1 + s52 = primals_3 + assert_size_stride(primals_2, (s97, ), (1, )) + assert_size_stride(primals_4, (s52, ), (1, )) buf1 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_abs_add_div_lt_sum_0(primals_4, primals_2, buf1, buf2, buf0, s1, s0) + buf0 = empty_strided_cpu((s97, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(primals_4, primals_2, buf1, buf2, buf0, s52, s97) del buf1 del primals_4 - return (buf0, buf2, primals_2, s0, ) + return (buf2, buf0, primals_2, s97, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py index 4df30593..06b0ce7f 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.kernel_1.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,64 +30,69 @@ cpp_fused_abs_add_div_mul_neg_sgn_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, const int64_t ks0) { { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp1.abs(); - auto tmp3 = static_cast(1.0); - auto tmp4 = at::vec::Vectorized(tmp3); - auto tmp5 = tmp2 + tmp4; - auto tmp6 = tmp0 / tmp5; - auto tmp7 = tmp0.neg(); - auto tmp8 = tmp1 / tmp5; - auto tmp9 = tmp8 / tmp5; - auto tmp10 = tmp7 * tmp9; - auto tmp11 = - [&]() { - auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); - auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); - return left - right; + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp1.abs(); + auto tmp3 = static_cast(1.0); + auto tmp4 = at::vec::Vectorized(tmp3); + auto tmp5 = tmp2 + tmp4; + auto tmp6 = tmp0 / tmp5; + auto tmp7 = tmp0.neg(); + auto tmp8 = tmp1 / tmp5; + auto tmp9 = tmp8 / tmp5; + auto tmp10 = tmp7 * tmp9; + auto tmp11 = + [&]() + { + auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); + auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); + return left - right; + } + () + ; + auto tmp12 = tmp10 * tmp11; + auto tmp13 = tmp6 + tmp12; + tmp13.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = tmp1.abs(); + auto tmp3 = static_cast(1.0); + auto tmp4 = at::vec::Vectorized(tmp3); + auto tmp5 = tmp2 + tmp4; + auto tmp6 = tmp0 / tmp5; + auto tmp7 = tmp0.neg(); + auto tmp8 = tmp1 / tmp5; + auto tmp9 = tmp8 / tmp5; + auto tmp10 = tmp7 * tmp9; + auto tmp11 = + [&]() + { + auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); + auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); + return left - right; + } + () + ; + auto tmp12 = tmp10 * tmp11; + auto tmp13 = tmp6 + tmp12; + tmp13.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } } - () - ; - auto tmp12 = tmp10 * tmp11; - auto tmp13 = tmp6 + tmp12; - tmp13.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp2 = tmp1.abs(); - auto tmp3 = static_cast(1.0); - auto tmp4 = at::vec::Vectorized(tmp3); - auto tmp5 = tmp2 + tmp4; - auto tmp6 = tmp0 / tmp5; - auto tmp7 = tmp0.neg(); - auto tmp8 = tmp1 / tmp5; - auto tmp9 = tmp8 / tmp5; - auto tmp10 = tmp7 * tmp9; - auto tmp11 = - [&]() - { - auto left = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), decltype(tmp1)(0) < tmp1); - auto right = decltype(tmp1)::blendv(decltype(tmp1)(0), decltype(tmp1)(1), tmp1 < decltype(tmp1)(0)); - return left - right; - } - () - ; - auto tmp12 = tmp10 * tmp11; - auto tmp13 = tmp6 + tmp12; - tmp13.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); } } } @@ -98,11 +105,11 @@ def call(args): primals_1, primals_2, tangents_1 = args args.clear() - s0 = primals_1 - assert_size_stride(primals_2, (s0, ), (1, )) - assert_size_stride(tangents_1, (s0, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_abs_add_div_mul_neg_sgn_0(tangents_1, primals_2, buf0, s0) + s97 = primals_1 + assert_size_stride(primals_2, (s97, ), (1, )) + assert_size_stride(tangents_1, (s97, ), (1, )) + buf0 = empty_strided_cpu((s97, ), (1, ), torch.float32) + cpp_fused_abs_add_div_mul_neg_sgn_0(tangents_1, primals_2, buf0, s97) del primals_2 del tangents_1 return (None, buf0, None, None, ) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py deleted file mode 100644 index ef6c1324..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__compiled_fn_7.tensorify_python_scalars.0.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "Sym(s0)"; primals_2: "f32[s0]"; primals_3: "Sym(s1)"; primals_4: "f32[s1]"; tangents_1: "f32[s0]"; - - primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(primals_2) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_4); primals_4 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[s0]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[s0]" = torch.ops.aten.div.Tensor(primals_2, add_2) - div_2: "f32[s0]" = torch.ops.aten.div.Tensor(div_1, add_2); div_1 = None - mul_3: "f32[s0]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[s0]" = torch.ops.aten.div.Tensor(tangents_1, add_2); tangents_1 = add_2 = None - sign: "f32[s0]" = torch.ops.aten.sign.default(primals_2); primals_2 = None - mul_4: "f32[s0]" = torch.ops.aten.mul.Tensor(mul_3, sign); mul_3 = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_7: "f32[s0]" = torch.ops.aten.add.Tensor(div_3, mul_4); div_3 = mul_4 = None - return pytree.tree_unflatten([div, lt, None, add_7, None, None], self._out_spec) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py index a1e40aaf..9536e462 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_forward.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 3deb3869..d1534085 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py index bd7e9171..238d1072 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1639ba0e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13c4224d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x161f87880>''' +___dict_contains = '''. at 0x13dcffbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1640000d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f4c01f0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x161f87880>''' +___dict_contains = '''. at 0x13dcffbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1639ba0e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13c4224d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x161f87880>''' +___dict_contains = '''. at 0x13dcffbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1640000d0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f4c01f0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x161f87880>''' +___dict_contains = '''. at 0x13dcffbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1639b81f0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f44d630>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x161f87880>''' +___dict_contains = '''. at 0x13dcffbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354878144) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358363136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4361468592) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4435477264) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16390d990>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13dcff5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x161f87880>''' +___dict_contains = '''. at 0x13dcffbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4354878144) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358363136) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4361468592) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4435477264) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py index afcffc21..445b975a 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e9b60e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168f224d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d76f880>''' +___dict_contains = '''. at 0x16eb1bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ef70040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f1c01f0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d76f880>''' +___dict_contains = '''. at 0x16eb1bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e9b60e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x168f224d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d76f880>''' +___dict_contains = '''. at 0x16eb1bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11ef70040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f1c01f0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d76f880>''' +___dict_contains = '''. at 0x16eb1bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e9b4310>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16f149630>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d76f880>''' +___dict_contains = '''. at 0x16eb1bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4362792096) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439825424) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356700928) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358030336) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e911990>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16eb1b5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d76f880>''' +___dict_contains = '''. at 0x16eb1bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4362792096) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4439825424) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356700928) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4358030336) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py index a00f2924..a8d3826d 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11feb60e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17aa264d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11db73880>''' +___dict_contains = '''. at 0x17e75fbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12dbc0040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17a9af6d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11db73880>''' +___dict_contains = '''. at 0x17e75fbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11feb60e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17aa264d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11db73880>''' +___dict_contains = '''. at 0x17e75fbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12dbc0040>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17a9af6d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11db73880>''' +___dict_contains = '''. at 0x17e75fbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11feb4310>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17ec92290>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11db73880>''' +___dict_contains = '''. at 0x17e75fbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4366363328) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4434581344) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363483584) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4364797248) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fe0d990>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x17e75f5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11db73880>''' +___dict_contains = '''. at 0x17e75fbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4366363328) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4434581344) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4363483584) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4364797248) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..2ef4069c --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py deleted file mode 100644 index b9545e37..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py index 466582ab..d0b9eff2 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -39,46 +41,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + } + { + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } + } } } } @@ -93,13 +109,13 @@ def call(args): args.clear() assert_size_stride(arg0_1, (10, ), (1, )) assert_size_stride(arg1_1, (10, ), (1, )) - buf1 = empty_strided_cpu((), (), torch.float32) + buf0 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) - cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf1, buf2, buf0) + buf1 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf0, buf2, buf1) del arg0_1 del arg1_1 - return (buf0, buf2, ) + return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py deleted file mode 100644 index b9545e37..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..2cfd53c1 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,15 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py index 97dd87fe..2cfd53c1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Captured_Graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py deleted file mode 100644 index 2f386a3c..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.Forward_graph.0.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py index 4447a169..7d81efa0 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,32 +30,37 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, const int64_t ks0) { { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - auto tmp2 = static_cast(-1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 * tmp3; - auto tmp5 = tmp0 * tmp4; - tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + auto tmp2 = static_cast(-1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 * tmp3; + auto tmp5 = tmp0 * tmp4; + tmp5.store(out_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } } } @@ -66,11 +73,11 @@ def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() - s0 = arg0_1 - assert_size_stride(arg1_1, (s0, ), (1, )) - assert_size_stride(arg2_1, (s0, ), (1, )) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_mul_0(arg2_1, arg1_1, buf0, s0) + s52 = arg0_1 + assert_size_stride(arg1_1, (s52, ), (1, )) + assert_size_stride(arg2_1, (s52, ), (1, )) + buf0 = empty_strided_cpu((s52, ), (1, ), torch.float32) + cpp_fused_mul_0(arg2_1, arg1_1, buf0, s52) del arg1_1 del arg2_1 return (buf0, ) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py index 78cad323..87ae2123 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.pre_insert_deferred_runtime_asserts___compiled_fn_11.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_b_: "f32[s0]", s1: "Sym(s0)", L_x_: "f32[s0]"): - l_b_ = L_b_ - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - b: "f32[s0]" = l_b_ * -1; l_b_ = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[s0]" = l_x_ * b; l_x_ = b = None - return (mul_1,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s52: "Sym(s52)", L_b_: "f32[s52]", s77: "Sym(s52)", L_x_: "f32[s52]"): + l_b_ = L_b_ + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 + b: "f32[s52]" = l_b_ * -1; l_b_ = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[s52]" = l_x_ * b; l_x_ = b = None + return (mul_1,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py deleted file mode 100644 index 2f386a3c..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_11.tensorify_python_scalars.0.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "f32[s0]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15, code: b = b * -1 - mul: "f32[s0]" = torch.ops.aten.mul.Tensor(arg1_1, -1); arg1_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_2: "f32[s0]" = torch.ops.aten.mul.Tensor(arg2_1, mul); arg2_1 = mul = None - return (mul_2,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..3765b6ed --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,12 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py deleted file mode 100644 index b036c79d..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py index a17b3a1d..35feb7e1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,25 +30,30 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + } + } } } } diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py deleted file mode 100644 index b036c79d..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..14896cd1 --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py deleted file mode 100644 index 722d8681..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.Forward_graph.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py index 326bb816..f21bf5a2 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*', 'const int64_t', 'const int64_t'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -41,46 +43,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(ks0); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))); x0(ks0); x0+=(static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))) == 0 ? 1 : static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL))))))) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))) && x0 < static_cast(ks0))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(ks0 + ((-4LL)*(c10::div_floor_integer(static_cast(ks0), static_cast(4LL)))))); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))); x0(ks1); x0+=(static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))) == 0 ? 1 : static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL))))))) + } + { + for(int64_t x0=static_cast(0LL); x0(ks1); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(4LL*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))) && x0 < static_cast(ks1))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(ks1 + ((-4LL)*(c10::div_floor_integer(static_cast(ks1), static_cast(4LL)))))); + } + } } } } @@ -93,17 +109,17 @@ def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() - s0 = arg0_1 - s1 = arg2_1 - assert_size_stride(arg1_1, (s0, ), (1, )) - assert_size_stride(arg3_1, (s1, ), (1, )) - buf1 = empty_strided_cpu((), (), torch.float32) + s97 = arg0_1 + s52 = arg2_1 + assert_size_stride(arg1_1, (s97, ), (1, )) + assert_size_stride(arg3_1, (s52, ), (1, )) + buf0 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - buf0 = empty_strided_cpu((s0, ), (1, ), torch.float32) - cpp_fused_abs_add_div_lt_sum_0(arg3_1, arg1_1, buf1, buf2, buf0, s1, s0) + buf1 = empty_strided_cpu((s97, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg3_1, arg1_1, buf0, buf2, buf1, s52, s97) del arg1_1 del arg3_1 - return (buf0, buf2, ) + return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py index 67c99a8b..14896cd1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.pre_insert_deferred_runtime_asserts___compiled_fn_7.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, s0: "Sym(s0)", L_a_: "f32[s0]", s1: "Sym(s1)", L_b_: "f32[s1]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.abs(l_a_) - add: "f32[s0]" = abs_1 + 1; abs_1 = None - x: "f32[s0]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, s97: "Sym(s97)", L_a_: "f32[s97]", s52: "Sym(s52)", L_b_: "f32[s52]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[s97]" = torch.abs(l_a_) + add: "f32[s97]" = abs_1 + 1; abs_1 = None + x: "f32[s97]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py deleted file mode 100644 index 722d8681..00000000 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__compiled_fn_7.tensorify_python_scalars.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "Sym(s0)", arg1_1: "f32[s0]", arg2_1: "Sym(s1)", arg3_1: "f32[s1]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[s0]" = torch.ops.aten.abs.default(arg1_1) - add_2: "f32[s0]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[s0]" = torch.ops.aten.div.Tensor(arg1_1, add_2); arg1_1 = add_2 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg3_1); arg3_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py index a1e40aaf..9536e462 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_forward.py @@ -1,8 +1,14 @@ def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py index 3deb3869..d1534085 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/__transformed_code_1_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,6 +1,11 @@ def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py index b56f3b2c..3120724e 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11db26dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c09f5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d5beef0>''' +___dict_contains = '''. at 0x16c09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f1ad750>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16b7156c0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d5beef0>''' +___dict_contains = '''. at 0x16c09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11db26dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c09f5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d5beef0>''' +___dict_contains = '''. at 0x16c09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f1ad750>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16b7156c0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d5beef0>''' +___dict_contains = '''. at 0x16c09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11f1439a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16b5669e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d5beef0>''' +___dict_contains = '''. at 0x16c09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353108672) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412561408) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4394973392) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396286416) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11db25240>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16bf9e560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d5beef0>''' +___dict_contains = '''. at 0x16c09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353108672) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4412561408) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4394973392) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4396286416) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py index 3b11680e..f222a679 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e42edd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1539af5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11debeef0>''' +___dict_contains = '''. at 0x1539afb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e965750>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15fda79a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11debeef0>''' +___dict_contains = '''. at 0x1539afb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e42edd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1539af5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11debeef0>''' +___dict_contains = '''. at 0x1539afb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e965750>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15fda79a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11debeef0>''' +___dict_contains = '''. at 0x1539afb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e9039a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15fc1e320>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11debeef0>''' +___dict_contains = '''. at 0x1539afb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355467968) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424095744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4324932272) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326261840) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11e42d240>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1538ae560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11debeef0>''' +___dict_contains = '''. at 0x1539afb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4355467968) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4424095744) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4324932272) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4326261840) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py index 3b63da58..8f1202f8 100644 --- a/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_with_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x136a32dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15b09f5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1363baef0>''' +___dict_contains = '''. at 0x15b09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -105,67 +73,34 @@ def transformed___resume_at_38_3(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1370b1750>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15b6fd5a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1363baef0>''' +___dict_contains = '''. at 0x15b09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -173,8 +108,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -189,9 +123,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -212,67 +151,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x136a32dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15b09f5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1363baef0>''' +___dict_contains = '''. at 0x15b09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -280,7 +186,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -294,8 +199,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -315,67 +222,34 @@ def transformed___resume_at_38_9(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1370b1750>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15b6fd5a0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1363baef0>''' +___dict_contains = '''. at 0x15b09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -383,8 +257,7 @@ def __guard_1_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) - __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:845 in infer_size) + __guard_hit = __guard_hit and L['x'].size()[0] == L['b'].size()[0] # return x * b # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15 (_subclasses/fake_impls.py:915 in infer_size) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # b = b * -1 # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:16 in torch_dynamo_resume_in_forward_at_15 (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -399,9 +272,14 @@ def __compiled_fn_11(*args, **kwargs): def __transformed_code_1_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_17, = __compiled_fn_11(__import_torch_dot__dynamo_dot_utils. - call_size(b, 0), b, x) - return __temp_17 + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = b + __temp_10 = __import_torch_dot__dynamo_dot_utils.call_size(b, 0) + tmp_4 = __temp_10 + tmp_5 = x + graph_out_0 = __compiled_fn_11(__temp_10, tmp_3, x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -422,67 +300,34 @@ def transformed___resume_at_30_8(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1370479a0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15b6cfeb0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1363baef0>''' +___dict_contains = '''. at 0x15b09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) @@ -490,9 +335,8 @@ def __guard_1_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[None], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350241472) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4373764096) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322769024) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4324082128) __guard_hit = __guard_hit and 2 <= L['a'].size()[0] # x = a / (torch.abs(a) + 1) # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) __guard_hit = __guard_hit and 2 <= L['b'].size()[0] # if b.sum() < 0: # data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)) return __guard_hit @@ -507,11 +351,17 @@ def __compiled_fn_7(*args, **kwargs): pass def __transformed_code_1_for_forward(self, a, b): - __temp_11, __temp_12 = __compiled_fn_7(__import_torch_dot__dynamo_dot_utils - .call_size(a, 0), a, __import_torch_dot__dynamo_dot_utils.call_size(b, - 0), b) - x = __temp_11 - if __temp_12: + tmp_1 = __import_torch_dot__dynamo_dot_utils + tmp_2 = __import_torch_dot__dynamo_dot_utils.call_size + tmp_3 = a + __temp_5 = __import_torch_dot__dynamo_dot_utils.call_size(a, 0) + tmp_4 = __temp_5 + tmp_5 = b + __temp_6 = tmp_2(b, 0) + tmp_6 = __temp_6 + graph_out_0 = __compiled_fn_7(__temp_5, tmp_3, __temp_6, tmp_5) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_8(b, x) return __resume_at_38_9(b, x) @@ -519,67 +369,34 @@ def __transformed_code_1_for_forward(self, a, b): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x136a31240>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15a79e560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1363baef0>''' +___dict_contains = '''. at 0x15b09fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -587,9 +404,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4350241472) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4373764096) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322769024) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4324082128) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -602,9 +418,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..2ef4069c --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py index a4ea69d4..289f0924 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Backward_graph.0.py @@ -1,20 +1,20 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return (add_1, None) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return (add_1, None) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py index 94842116..f26b0341 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Forward_graph.0.py @@ -1,15 +1,15 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt, primals_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add); add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + return (lt, div, primals_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py index 32406778..12e5174f 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.Joint_graph.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py index f84427fe..9cae4c04 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -39,46 +41,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + } + { + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } + } } } } @@ -99,7 +115,7 @@ def call(args): cpp_fused_abs_add_div_lt_sum_0(primals_2, primals_1, buf1, buf2, buf0) del buf1 del primals_2 - return (buf0, buf2, primals_1, ) + return (buf2, buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py index 32406778..12e5174f 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_1.tensorify_python_scalars.0.py @@ -1,30 +1,30 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) - div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) - div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None - mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None - div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None - sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None - return pytree.tree_unflatten([div, lt, add_1, None], self._out_spec) - \ No newline at end of file + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.ops.aten.abs.default(primals_1) + add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None + div: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = torch.ops.aten.sum.default(primals_2); primals_2 = None + lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + neg: "f32[10]" = torch.ops.aten.neg.default(tangents_1) + div_1: "f32[10]" = torch.ops.aten.div.Tensor(primals_1, add) + div_2: "f32[10]" = torch.ops.aten.div.Tensor(div_1, add); div_1 = None + mul: "f32[10]" = torch.ops.aten.mul.Tensor(neg, div_2); neg = div_2 = None + div_3: "f32[10]" = torch.ops.aten.div.Tensor(tangents_1, add); tangents_1 = add = None + sign: "f32[10]" = torch.ops.aten.sign.default(primals_1); primals_1 = None + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(mul, sign); mul = sign = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + add_1: "f32[10]" = torch.ops.aten.add.Tensor(div_3, mul_1); div_3 = mul_1 = None + return pytree.tree_unflatten([lt, div, add_1, None], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..3765b6ed --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,12 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py index caa124fb..28960e9f 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Backward_graph.0.py @@ -1,10 +1,10 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return (mul_2, mul_1) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", tangents_1: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return (mul_2, mul_1) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py index f0055de7..8171524f 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Forward_graph.0.py @@ -1,9 +1,9 @@ from __future__ import annotations - - - -def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - return (mul, primals_1, primals_2) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"): + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + return (mul, primals_1, primals_2) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py index 055b4058..2a8efc5b 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.Joint_graph.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py index a2081e47..e4f328d7 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,25 +30,30 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + } + } } } } diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py index 055b4058..2a8efc5b 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__compiled_fn_5.tensorify_python_scalars.0.py @@ -1,14 +1,14 @@ from __future__ import annotations - - - -def forward(self, primals, tangents): - primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; - - primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) - mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None - mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None - return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) - \ No newline at end of file +import torch +from torch import device +class joint_helper(torch.nn.Module): + def forward(self, primals, tangents): + primals_1: "f32[10]"; primals_2: "f32[10]"; tangents_1: "f32[10]"; + + primals_1, primals_2, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec) + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = torch.ops.aten.mul.Tensor(primals_1, primals_2) + mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); primals_1 = None + mul_2: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, primals_2); tangents_1 = primals_2 = None + return pytree.tree_unflatten([mul, mul_2, mul_1], self._out_spec) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py index bad5144e..9e01e07c 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d2b60e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x15581e4d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d36f880>''' +___dict_contains = '''. at 0x157e8bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x12d215990>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x157e8b5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11d36f880>''' +___dict_contains = '''. at 0x157e8bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4359580352) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4417804288) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4364285840) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4365598784) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py index c9d22ebf..2fa5d869 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x142bb60e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x13f1224d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb6f880>''' +___dict_contains = '''. at 0x14a27bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x142b0d990>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14a27b5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb6f880>''' +___dict_contains = '''. at 0x14a27bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4383255472) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4461845440) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4353292336) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4354801728) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py index a6d5b6a9..3cee3d8b 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_with_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1141b60e0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a5224d0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11326f880>''' +___dict_contains = '''. at 0x31511bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x114105990>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x31511b5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11326f880>''' +___dict_contains = '''. at 0x31511bbe0>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=True, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4397673152) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4401158144) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389026320) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4390355488) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..2ef4069c --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,18 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Captured_Graph.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py deleted file mode 100644 index b9545e37..00000000 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.Forward_graph.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py index 466582ab..d0b9eff2 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,7 +30,7 @@ cpp_fused_abs_add_div_lt_sum_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'bool*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, @@ -39,46 +41,60 @@ { float tmp_acc0 = 0; at::vec::Vectorized tmp_acc0_vec = at::vec::Vectorized(0); - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - tmp_acc0_vec = tmp_acc0_vec + tmp0; - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + tmp_acc0_vec = tmp_acc0_vec + tmp0; + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + tmp_acc0_vec = sum_masked_reduce(tmp_acc0_vec, tmp0, static_cast(2LL)); + } + } } tmp_acc0 = tmp_acc0 + at::vec::vec_reduce_all([](at::vec::Vectorized& x, at::vec::Vectorized& y) { return x + y; }, tmp_acc0_vec); out_ptr0[static_cast(0LL)] = static_cast(tmp_acc0); } } { - auto tmp0 = out_ptr0[static_cast(0LL)]; - auto tmp1 = static_cast(0.0); - auto tmp2 = tmp0 < tmp1; - out_ptr1[static_cast(0LL)] = tmp2; - } - { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0)); + { + auto tmp0 = out_ptr0[static_cast(0LL)]; + auto tmp1 = static_cast(0.0); + auto tmp2 = tmp0 < tmp1; + out_ptr1[static_cast(0LL)] = tmp2; + } } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) + } + { + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp1 = tmp0.abs(); - auto tmp2 = static_cast(1.0); - auto tmp3 = at::vec::Vectorized(tmp2); - auto tmp4 = tmp1 + tmp3; - auto tmp5 = tmp0 / tmp4; - tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp1 = tmp0.abs(); + auto tmp2 = static_cast(1.0); + auto tmp3 = at::vec::Vectorized(tmp2); + auto tmp4 = tmp1 + tmp3; + auto tmp5 = tmp0 / tmp4; + tmp5.store(out_ptr2 + static_cast(x0), static_cast(2LL)); + } + } } } } @@ -93,13 +109,13 @@ def call(args): args.clear() assert_size_stride(arg0_1, (10, ), (1, )) assert_size_stride(arg1_1, (10, ), (1, )) - buf1 = empty_strided_cpu((), (), torch.float32) + buf0 = empty_strided_cpu((), (), torch.float32) buf2 = empty_strided_cpu((), (), torch.bool) - buf0 = empty_strided_cpu((10, ), (1, ), torch.float32) - cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf1, buf2, buf0) + buf1 = empty_strided_cpu((10, ), (1, ), torch.float32) + cpp_fused_abs_add_div_lt_sum_0(arg1_1, arg0_1, buf0, buf2, buf1) del arg0_1 del arg1_1 - return (buf0, buf2, ) + return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 9e2b0f38..2ef4069c 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,18 +1,18 @@ from __future__ import annotations - - - -def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): - l_a_ = L_a_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.abs(l_a_) - add: "f32[10]" = abs_1 + 1; abs_1 = None - x: "f32[10]" = l_a_ / add; l_a_ = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = l_b_.sum(); l_b_ = None - lt: "b8[]" = sum_1 < 0; sum_1 = None - return (x, lt) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_a_: "f32[10]", L_b_: "f32[10]"): + l_a_ = L_a_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) + abs_1: "f32[10]" = torch.abs(l_a_) + add: "f32[10]" = abs_1 + 1; abs_1 = None + x: "f32[10]" = l_a_ / add; l_a_ = add = None + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: + sum_1: "f32[]" = l_b_.sum(); l_b_ = None + lt: "b8[]" = sum_1 < 0; sum_1 = None + return (lt, x) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py deleted file mode 100644 index b9545e37..00000000 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_1.tensorify_python_scalars.0.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:14 in forward, code: x = a / (torch.abs(a) + 1) - abs_1: "f32[10]" = torch.ops.aten.abs.default(arg0_1) - add: "f32[10]" = torch.ops.aten.add.Tensor(abs_1, 1); abs_1 = None - div: "f32[10]" = torch.ops.aten.div.Tensor(arg0_1, add); arg0_1 = add = None - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:15 in forward, code: if b.sum() < 0: - sum_1: "f32[]" = torch.ops.aten.sum.default(arg1_1); arg1_1 = None - lt: "b8[]" = torch.ops.aten.lt.Scalar(sum_1, 0); sum_1 = None - return (div, lt) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py new file mode 100644 index 00000000..3765b6ed --- /dev/null +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.BEFORE_PRE_GRAD.0.py @@ -0,0 +1,12 @@ +from __future__ import annotations +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Captured_Graph.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py deleted file mode 100644 index b036c79d..00000000 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.Forward_graph.0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py index a17b3a1d..35feb7e1 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.kernel_0.py @@ -6,6 +6,7 @@ import os import tempfile from math import inf, nan +from cmath import nanj from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align @@ -18,6 +19,7 @@ inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride +assert_alignment = torch._C._dynamo.guards.assert_alignment empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu @@ -28,25 +30,30 @@ cpp_fused_mul_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*'], ''' -#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/2r/c2rnilspx43ivnzu4uieul65kx65dfhfbptbh5og4wk6rqebuxoo.h" +#include "/var/folders/vm/ssf622nn02j77t14q1j8_88w0000gn/T/torchinductor_youkaichao/do/cdoggdcp7ux2jv5ebkajvacaprabp6b4h4m2o3zifjj6xwp2kz4n.h" extern "C" void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0) { { - for(int64_t x0=static_cast(0LL); x0(8LL); x0+=static_cast(4LL)) + for(int64_t x0=static_cast(0LL); x0(10LL); x0+=static_cast(4LL)) { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0)); - } - for(int64_t x0=static_cast(8LL); x0(10LL); x0+=static_cast(2LL)) - { - auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); - auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); - auto tmp2 = tmp0 * tmp1; - tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + { + if(C10_LIKELY(x0 >= static_cast(0) && x0 < static_cast(8LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(4)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(4)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0)); + } + if(C10_UNLIKELY(x0 >= static_cast(8LL) && x0 < static_cast(10LL))) + { + auto tmp0 = at::vec::Vectorized::loadu(in_ptr0 + static_cast(x0), static_cast(2LL)); + auto tmp1 = at::vec::Vectorized::loadu(in_ptr1 + static_cast(x0), static_cast(2LL)); + auto tmp2 = tmp0 * tmp1; + tmp2.store(out_ptr0 + static_cast(x0), static_cast(2LL)); + } + } } } } diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py index 092bb929..3765b6ed 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.pre_insert_deferred_runtime_asserts___compiled_fn_5.0.py @@ -1,12 +1,12 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): - l_x_ = L_x_ - l_b_ = L_b_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None - return (mul,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[10]", L_b_: "f32[10]"): + l_x_ = L_x_ + l_b_ = L_b_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b + mul: "f32[10]" = l_x_ * l_b_; l_x_ = l_b_ = None + return (mul,) + \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py deleted file mode 100644 index b036c79d..00000000 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__compiled_fn_5.tensorify_python_scalars.0.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import annotations - - - -def forward(self, arg0_1: "f32[10]", arg1_1: "f32[10]"): - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_pytorch.py:17 in torch_dynamo_resume_in_forward_at_15, code: return x * b - mul: "f32[10]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None - return (mul,) - \ No newline at end of file diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py index f60cd0e1..f9f299a2 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_forward.py @@ -1,6 +1,8 @@ def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py index c167728a..a11176d1 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/__transformed_code_0_for_torch_dynamo_resume_in_forward_at_15.py @@ -1,5 +1,7 @@ def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py index a05d45aa..d11474dc 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133532dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16aa9f5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1327beef0>''' +___dict_contains = '''. at 0x16aa9fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x133531240>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16a99e560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1327beef0>''' +___dict_contains = '''. at 0x16aa9fb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4322356144) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4375862608) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4319673088) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4399826160) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py index b24c4798..fa9c1a23 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128d32dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14d89b5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f7beef0>''' +___dict_contains = '''. at 0x14d89bb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x128d31240>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x14d79a560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11f7beef0>''' +___dict_contains = '''. at 0x14d89bb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389006336) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4420951056) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4392007568) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4393320912) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py index b6f01413..1bd7c897 100644 --- a/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py +++ b/tests/depyf_output/debug_module_inductor_without_dynamic_shape_without_grad/full_code_for_forward_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11bc36dd0>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c19b5b0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11adbaef0>''' +___dict_contains = '''. at 0x16c19bb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -70,7 +37,6 @@ def __guard_0_for_torch_dynamo_resume_in_forward_at_15(L, G, **___kwargs_ignored __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['x'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['b'], L['x']) return __guard_hit # Note: please refer to the graph code in __compiled_fn_5*.py. @@ -84,8 +50,10 @@ def __compiled_fn_5(*args, **kwargs): def __transformed_code_0_for_torch_dynamo_resume_in_forward_at_15(b, x): a = None; self = None # this line helps Python to generate bytecode with at least the same number of local variables as the original function - __temp_7, = __compiled_fn_5(x, b) - return __temp_7 + tmp_1 = x + tmp_2 = b + graph_out_0 = __compiled_fn_5(x, b) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. @@ -118,67 +86,34 @@ def transformed___resume_at_30_2(b, x): # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11bc35240>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16c09a560>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11adbaef0>''' +___dict_contains = '''. at 0x16c19bb50>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) @@ -186,9 +121,8 @@ def __guard_0_for_forward(L, G, **___kwargs_ignored): __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) __guard_hit = __guard_hit and check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[10], stride=[1]) __guard_hit = __guard_hit and hasattr(L['b'], '_dynamo_dynamic_indices') == False - __guard_hit = __guard_hit and check_no_aliasing(L['a'], L['b']) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4356991920) - __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4360494256) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'], 4389910736) + __guard_hit = __guard_hit and ___check_obj_id(G['torch'].abs, 4391224080) return __guard_hit # Note: please refer to the graph code in __compiled_fn_1*.py. @@ -201,9 +135,11 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_forward(self, a, b): - __temp_2, __temp_3 = __compiled_fn_1(a, b) - x = __temp_2 - if __temp_3: + tmp_1 = a + tmp_2 = b + graph_out_0 = __compiled_fn_1(a, b) + x = graph_out_0[1] + if graph_out_0[0]: return __resume_at_30_2(b, x) return __resume_at_38_3(b, x) diff --git a/tests/depyf_output/multiprocessing/__compiled_fn_1.Captured_Graph.0.py b/tests/depyf_output/multiprocessing/__compiled_fn_1.Captured_Graph.0.py index 06186698..f1126e79 100644 --- a/tests/depyf_output/multiprocessing/__compiled_fn_1.Captured_Graph.0.py +++ b/tests/depyf_output/multiprocessing/__compiled_fn_1.Captured_Graph.0.py @@ -1,11 +1,11 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[5]"): - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 - add: "f32[5]" = l_x_ + 1; l_x_ = None - return (add,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[5]"): + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 + add: "f32[5]" = l_x_ + 1; l_x_ = None + return (add,) + \ No newline at end of file diff --git a/tests/depyf_output/multiprocessing/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py b/tests/depyf_output/multiprocessing/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py index 06186698..f1126e79 100644 --- a/tests/depyf_output/multiprocessing/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py +++ b/tests/depyf_output/multiprocessing/__compiled_fn_1.pre_insert_deferred_runtime_asserts___compiled_fn_1.0.py @@ -1,11 +1,11 @@ from __future__ import annotations - - - -def forward(self, L_x_: "f32[5]"): - l_x_ = L_x_ - - # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 - add: "f32[5]" = l_x_ + 1; l_x_ = None - return (add,) - \ No newline at end of file +import torch +from torch import device +class GraphModule(torch.nn.Module): + def forward(self, L_x_: "f32[5]"): + l_x_ = L_x_ + + # File: /Users/youkaichao/data/DeepLearning/depyf/tests/test_pytorch/test_mp.py:5 in f, code: return x + 1 + add: "f32[5]" = l_x_ + 1; l_x_ = None + return (add,) + \ No newline at end of file diff --git a/tests/depyf_output/multiprocessing/__transformed_code_0_for_f.py b/tests/depyf_output/multiprocessing/__transformed_code_0_for_f.py index b1630a3a..b4d112d9 100644 --- a/tests/depyf_output/multiprocessing/__transformed_code_0_for_f.py +++ b/tests/depyf_output/multiprocessing/__transformed_code_0_for_f.py @@ -1,3 +1,4 @@ def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 + tmp_1 = x + graph_out_0 = __compiled_fn_1(x) + return graph_out_0[0] diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_0.py b/tests/depyf_output/multiprocessing/full_code_for_f_0.py index b78a1e4a..b562daf6 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_0.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_0.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x11fba7880>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x16fc529e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x11fb769e0>''' +___dict_contains = '''. at 0x16fc52830>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) @@ -79,8 +46,9 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 + tmp_1 = x + graph_out_0 = __compiled_fn_1(x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_1.py b/tests/depyf_output/multiprocessing/full_code_for_f_1.py index 1cbb4306..3c69e921 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_1.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_1.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1224a7880>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x1517529e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x1224769e0>''' +___dict_contains = '''. at 0x151752830>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) @@ -79,8 +46,9 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 + tmp_1 = x + graph_out_0 = __compiled_fn_1(x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. diff --git a/tests/depyf_output/multiprocessing/full_code_for_f_2.py b/tests/depyf_output/multiprocessing/full_code_for_f_2.py index f7cffe52..97f8dac6 100644 --- a/tests/depyf_output/multiprocessing/full_code_for_f_2.py +++ b/tests/depyf_output/multiprocessing/full_code_for_f_2.py @@ -2,67 +2,34 @@ # Note: the following variables are used inside the guard function. ___check_tensors = '''None''' ___check_tensors_verbose = '''None''' -___check_global_state = '''''' -___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x10f5a7880>''' -Abs = '''''' -Eq = '''''' -Ne = '''''' -Gt = '''''' -Lt = '''''' -Le = '''''' -Ge = '''''' -Min = '''''' -Max = '''''' -Mod = '''''' -PythonMod = '''''' -FloorDiv = '''''' -TrueDiv = '''''' -PowByNatural = '''''' -IsNonOverlappingAndDenseIndicator = '''''' -floor = '''''' -ceiling = '''''' -FloorToInt = '''''' -FloatPow = '''''' -CeilToInt = '''''' -cast_symbool_to_symint_guardless = '''''' -RoundToInt = '''''' -RoundDecimal = '''''' -TruncToInt = '''''' -IntTrueDiv = '''''' -FloatTrueDiv = '''''' -ToFloat = '''''' -OpaqueUnaryFn_cos = '''''' -OpaqueUnaryFn_cosh = '''''' -OpaqueUnaryFn_acos = '''''' -OpaqueUnaryFn_sin = '''''' -OpaqueUnaryFn_sinh = '''''' -OpaqueUnaryFn_asin = '''''' -OpaqueUnaryFn_tan = '''''' -OpaqueUnaryFn_tanh = '''''' -OpaqueUnaryFn_atan = '''''' -OpaqueUnaryFn_sqrt = '''''' +___check_global_state = '''''' +___check_torch_function_mode_stack = '''.check_torch_function_mode_stack at 0x167a529e0>''' +IsNonOverlappingAndDenseIndicator = '''''' +cast_symbool_to_symint_guardless = '''''' +math = '''''' +torch = '''''' ___check_type_id = '''''' ___check_obj_id = '''''' ___odict_getitem = '''''' -___key_to_id = '''''' +___key_to_id = '''''' ___dict_version = '''''' -___dict_contains = '''. at 0x10f5769e0>''' +___dict_contains = '''. at 0x167a52830>''' ___tuple_iterator_len = '''''' -___tuple_iterator_getitem = '''''' -___get_torch_function_mode_stack_at = '''''' +___normalize_range_iter = '''''' +___tuple_iterator_getitem = '''''' +___get_torch_function_mode_stack_at = '''''' __math_isnan = '''''' __numpy_isnan = '''''' inf = '''inf''' -__load_module = '''''' -utils_device = '''''' +__load_module = '''''' +utils_device = '''''' device = '''''' -___from_numpy = '''''' -___as_tensor = '''''' -torch = '''''' -inspect = '''''' +___from_numpy = '''''' +___as_tensor = '''''' +inspect = '''''' def __guard_0_for_f(L, G, **___kwargs_ignored): __guard_hit = True - __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:483 in init_ambient_guards + __guard_hit = __guard_hit and utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:551 in init_ambient_guards __guard_hit = __guard_hit and ___check_global_state() __guard_hit = __guard_hit and ___check_torch_function_mode_stack() __guard_hit = __guard_hit and check_tensor(L['x'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), torch.float32, device=None, requires_grad=False, size=[5], stride=[1]) @@ -79,8 +46,9 @@ def __compiled_fn_1(*args, **kwargs): pass def __transformed_code_0_for_f(x): - __temp_2, = __compiled_fn_1(x) - return __temp_2 + tmp_1 = x + graph_out_0 = __compiled_fn_1(x) + return graph_out_0[0] # Note: if there is a transformed version below, this function might well not be executed directly. Please check the transformed version if possible. From e379286a302abde2808d249ba186bbe7a8235cc0 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 20 Apr 2025 15:09:33 +0800 Subject: [PATCH 4/5] fix copy instruction (#84) Signed-off-by: youkaichao --- depyf/decompiler.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/depyf/decompiler.py b/depyf/decompiler.py index 6caf20b0..906d463a 100644 --- a/depyf/decompiler.py +++ b/depyf/decompiler.py @@ -808,9 +808,13 @@ def SWAP(self, inst: Instruction): self.state.stack[- n] = value def COPY(self, inst: Instruction): - # not tested, don't know how to generate this instruction n = inst.argval - value = self.state.stack[-1 - n] + # COPY argument is 1-based + # see https://discuss.python.org/t/the-oparg-of-the-new-copy-opcode-is-not-zero-based/22110 + # n == 0 is a silent error and will be ignored by the interpreter + if n == 0: + return + value = self.state.stack[-1 - (n - 1)] self.state.stack.append(value) def POP_TOP(self, inst: Instruction): From 2612c16584d7360a511044a313eac02e15f46831 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 20 Apr 2025 16:01:00 +0800 Subject: [PATCH 5/5] bump version Signed-off-by: youkaichao --- depyf/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depyf/VERSION.txt b/depyf/VERSION.txt index 47d04a52..3f46c4d1 100644 --- a/depyf/VERSION.txt +++ b/depyf/VERSION.txt @@ -1 +1 @@ -0.18.0 \ No newline at end of file +0.19.0 \ No newline at end of file