Mock Version: 3.5 Mock Version: 3.5 Mock Version: 3.5 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target x86_64 --nodeps /builddir/build/SPECS/intel-extension-for-pytorch.spec'], chrootPath='/var/lib/mock/dist-an23-epao-build-298499-65835/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=86400uid=990gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target x86_64 --nodeps /builddir/build/SPECS/intel-extension-for-pytorch.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: x86_64 Building for target x86_64 setting SOURCE_DATE_EPOCH=1693353600 Wrote: /builddir/build/SRPMS/intel-extension-for-pytorch-2.0.100-1.an23.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bb --noclean --target x86_64 --nodeps /builddir/build/SPECS/intel-extension-for-pytorch.spec'], chrootPath='/var/lib/mock/dist-an23-epao-build-298499-65835/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=86400uid=990gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bb --noclean --target x86_64 --nodeps /builddir/build/SPECS/intel-extension-for-pytorch.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: x86_64 Building for target x86_64 setting SOURCE_DATE_EPOCH=1693353600 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.XYcEJL + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf intel-extension-for-pytorch-2.0.100+cpu + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/intel-extension-for-pytorch-2.0.100cpu.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd intel-extension-for-pytorch-2.0.100+cpu + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-use-MKLROOT-env-to-set-mkl-path.patch + cd /builddir/build/BUILD + rm -rf 'mkl*' + unzip /builddir/build/SOURCES/mkl_include-2023.2.0-py2.py3-none-manylinux1_x86_64.whl Archive: /builddir/build/SOURCES/mkl_include-2023.2.0-py2.py3-none-manylinux1_x86_64.whl inflating: mkl_include-2023.2.0.data/data/include/i_malloc.h inflating: mkl_include-2023.2.0.data/data/include/mkl.fi inflating: mkl_include-2023.2.0.data/data/include/mkl.h inflating: mkl_include-2023.2.0.data/data/include/mkl_blacs.h inflating: mkl_include-2023.2.0.data/data/include/mkl_blas.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_blas.h inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_64.h inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_offload.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_offload.h inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_offload_ilp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_offload_ilp64_no_array_check.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_offload_lp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_offload_lp64_no_array_check.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_variant.h inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_variant_ilp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_variant_ilp64_no_array_check.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_variant_lp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_blas_omp_variant_lp64_no_array_check.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_cblas.h inflating: mkl_include-2023.2.0.data/data/include/mkl_cblas_64.h inflating: mkl_include-2023.2.0.data/data/include/mkl_cdft.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_cdft.h inflating: mkl_include-2023.2.0.data/data/include/mkl_cdft_types.h inflating: mkl_include-2023.2.0.data/data/include/mkl_cluster_sparse_solver.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_cluster_sparse_solver.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_cluster_sparse_solver.h inflating: mkl_include-2023.2.0.data/data/include/mkl_compact.h inflating: mkl_include-2023.2.0.data/data/include/mkl_df.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_df.h inflating: mkl_include-2023.2.0.data/data/include/mkl_df_defines.h inflating: mkl_include-2023.2.0.data/data/include/mkl_df_functions.h inflating: mkl_include-2023.2.0.data/data/include/mkl_df_types.h inflating: mkl_include-2023.2.0.data/data/include/mkl_dfti.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_dfti.h inflating: mkl_include-2023.2.0.data/data/include/mkl_dfti_omp_offload.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_dfti_omp_offload.h inflating: mkl_include-2023.2.0.data/data/include/mkl_direct_blas.h inflating: mkl_include-2023.2.0.data/data/include/mkl_direct_blas_kernels.h inflating: mkl_include-2023.2.0.data/data/include/mkl_direct_call.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_direct_call.h inflating: mkl_include-2023.2.0.data/data/include/mkl_direct_lapack.h inflating: mkl_include-2023.2.0.data/data/include/mkl_direct_types.h inflating: mkl_include-2023.2.0.data/data/include/mkl_dss.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_dss.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_dss.h inflating: mkl_include-2023.2.0.data/data/include/mkl_graph.h inflating: mkl_include-2023.2.0.data/data/include/mkl_jit_blas.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_jit_blas_ilp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_jit_blas_lp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack.h inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack_omp_offload.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack_omp_offload.h inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack_omp_offload_ilp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack_omp_offload_lp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack_omp_variant.h inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack_omp_variant_ilp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_lapack_omp_variant_lp64.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_lapacke.h inflating: mkl_include-2023.2.0.data/data/include/mkl_omp_offload.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_omp_offload.h inflating: mkl_include-2023.2.0.data/data/include/mkl_omp_variant.h inflating: mkl_include-2023.2.0.data/data/include/mkl_pardiso.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_pardiso.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_pardiso.h inflating: mkl_include-2023.2.0.data/data/include/mkl_pblas.h inflating: mkl_include-2023.2.0.data/data/include/mkl_poisson.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_poisson.h inflating: mkl_include-2023.2.0.data/data/include/mkl_rci.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_rci.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_rci.h inflating: mkl_include-2023.2.0.data/data/include/mkl_scalapack.h inflating: mkl_include-2023.2.0.data/data/include/mkl_service.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_service.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_service.h inflating: mkl_include-2023.2.0.data/data/include/mkl_solvers_ee.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_solvers_ee.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_solvers_ee.h inflating: mkl_include-2023.2.0.data/data/include/mkl_sparse_handle.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_sparse_handle.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_sparse_handle.h inflating: mkl_include-2023.2.0.data/data/include/mkl_sparse_qr.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_sparse_qr.h inflating: mkl_include-2023.2.0.data/data/include/mkl_spblas.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_spblas.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_spblas.h inflating: mkl_include-2023.2.0.data/data/include/mkl_spblas_omp_offload.h inflating: mkl_include-2023.2.0.data/data/include/mkl_spblas_omp_variant.h inflating: mkl_include-2023.2.0.data/data/include/mkl_trans.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_trans.h inflating: mkl_include-2023.2.0.data/data/include/mkl_trans_names.h inflating: mkl_include-2023.2.0.data/data/include/mkl_trig_transforms.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_trig_transforms.h inflating: mkl_include-2023.2.0.data/data/include/mkl_types.h inflating: mkl_include-2023.2.0.data/data/include/mkl_version.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vml.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_vml.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_vml.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vml_defines.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vml_functions.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vml_omp_offload.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_vml_omp_offload.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vml_omp_variant.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_vml_omp_variant.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vml_types.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_defines.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_functions.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_functions_64.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_omp_offload.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_omp_offload.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_omp_variant.f90 inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_omp_variant.h inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_subroutine.fi inflating: mkl_include-2023.2.0.data/data/include/mkl_vsl_types.h inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw.h inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3-mpi.f03 inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3-mpi.h inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3-mpi_mkl.h inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3.f inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3.f03 inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3.h inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3_mkl.f inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3_mkl.h inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw3_mkl_f77.h inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw_f77.i inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw_mpi.h inflating: mkl_include-2023.2.0.data/data/include/fftw/fftw_threads.h inflating: mkl_include-2023.2.0.data/data/include/fftw/rfftw.h inflating: mkl_include-2023.2.0.data/data/include/fftw/rfftw_mpi.h inflating: mkl_include-2023.2.0.data/data/include/fftw/rfftw_threads.h inflating: mkl_include-2023.2.0.data/data/include/fftw/offload/fftw3_omp_offload.f90 inflating: mkl_include-2023.2.0.data/data/include/fftw/offload/fftw3_omp_offload.h inflating: mkl_include-2023.2.0.data/data/include/fftw/offload/fftw3_omp_offload_common.h inflating: mkl_include-2023.2.0.data/data/include/intel64/ilp64/blas95.mod inflating: mkl_include-2023.2.0.data/data/include/intel64/ilp64/f95_precision.mod inflating: mkl_include-2023.2.0.data/data/include/intel64/ilp64/lapack95.mod inflating: mkl_include-2023.2.0.data/data/include/intel64/ilp64/mkl_service.mod inflating: mkl_include-2023.2.0.data/data/include/intel64/lp64/blas95.mod inflating: mkl_include-2023.2.0.data/data/include/intel64/lp64/f95_precision.mod inflating: mkl_include-2023.2.0.data/data/include/intel64/lp64/lapack95.mod inflating: mkl_include-2023.2.0.data/data/include/intel64/lp64/mkl_service.mod inflating: mkl_include-2023.2.0.dist-info/LICENSE.txt inflating: mkl_include-2023.2.0.dist-info/METADATA inflating: mkl_include-2023.2.0.dist-info/WHEEL inflating: mkl_include-2023.2.0.dist-info/top_level.txt inflating: mkl_include-2023.2.0.dist-info/RECORD + unzip /builddir/build/SOURCES/mkl_static-2023.2.0-py2.py3-none-manylinux1_x86_64.whl Archive: /builddir/build/SOURCES/mkl_static-2023.2.0-py2.py3-none-manylinux1_x86_64.whl inflating: mkl_static-2023.2.0.data/data/lib/libmkl_blacs_intelmpi_ilp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_blacs_intelmpi_lp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_blacs_openmpi_ilp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_blacs_openmpi_lp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_blas95_ilp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_blas95_lp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_cdft_core.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_core.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_gf_ilp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_gf_lp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_gnu_thread.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_intel_ilp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_intel_lp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_intel_thread.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_lapack95_ilp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_lapack95_lp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_pgi_thread.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_scalapack_ilp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_scalapack_lp64.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_sequential.a inflating: mkl_static-2023.2.0.data/data/lib/libmkl_tbb_thread.a inflating: mkl_static-2023.2.0.dist-info/LICENSE.txt inflating: mkl_static-2023.2.0.dist-info/METADATA inflating: mkl_static-2023.2.0.dist-info/WHEEL inflating: mkl_static-2023.2.0.dist-info/top_level.txt inflating: mkl_static-2023.2.0.dist-info/RECORD + mkdir mkl + cp -r mkl_include-2023.2.0.data/data/include mkl/ + cp -r mkl_static-2023.2.0.data/data/lib mkl/ + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.1D9bEp + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules' + export FCFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib64: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd intel-extension-for-pytorch-2.0.100+cpu + export MKLROOT=/builddir/build/BUILD/mkl + MKLROOT=/builddir/build/BUILD/mkl + export 'TORCH_CUDA_ARCH_LIST=6.0;6.1;7.0;7.5;8.0;8.6' + TORCH_CUDA_ARCH_LIST='6.0;6.1;7.0;7.5;8.0;8.6' + export CUDAARCHS=all + CUDAARCHS=all + export CMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc + CMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc + python3 setup.py build No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' Building Intel Extension for PyTorch. Version: 2.0.100+git25b7212 running build running build_py copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/frontend.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/_version.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/_auto_kernel_selection.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/_cpu_isa.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/auto_ipex.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/autocast/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/autocast copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/autocast/_autocast_mode.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/autocast copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/autocast/_grad_scaler.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/autocast copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/__main__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/objective.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/conf/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/conf copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/conf/config.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/conf copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/conf/dotdict.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/conf copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/example/resnet50.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/example copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/strategy/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/strategy/grid.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/strategy/random.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/hypertune/strategy/strategy.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/launch/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/launch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/launch/__main__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/launch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/launch/cpu_info.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/launch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/launch/launcher_base.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/launch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/launch/launcher_distributed.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/launch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/launch/launcher_multi_instances.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/launch copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/runtime/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/runtime copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/runtime/cpupool.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/runtime copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/runtime/multi_stream.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/runtime copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/runtime/runtime_utils.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/runtime copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/cpu/runtime/task.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/cpu/runtime copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/jit/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/jit copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/jit/_trace.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/jit copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/functional/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/functional copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/functional/_embeddingbag.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/functional copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/functional/_roi_align.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/functional copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/functional/_tensor_method.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/functional copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/functional/interaction.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/functional copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/modules/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/modules copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/modules/_roi_align.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/modules copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/modules/frozen_batch_norm.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/modules copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/modules/linear_fuse_eltwise.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/modules copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/modules/merged_embeddingbag.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/modules copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/utils/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/utils/_model_convert.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/utils/_weight_cast.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/nn/utils/_weight_prepack.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/nn/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/optim/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/optim copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/optim/_functional.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/optim copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/optim/_lamb.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/optim copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/optim/_optimizer_utils.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/optim copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_autotune.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_module_swap_utils.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_qconfig.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_quantization_state.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_quantization_state_utils.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_quantize.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_quantize_utils.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_recipe.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/quantization/_utils.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/quantization copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/utils/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/utils/_custom_fx_tracer.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/utils/channels_last_1d.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/utils/linear_bn_folding.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/utils/verbose.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/_utils.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/cpp_extension.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/lazy_init.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/memory.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/random.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/single_card.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/streams.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/utils.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/amp/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu/amp copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/amp/autocast_mode.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu/amp copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/intrinsic/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu/intrinsic copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/intrinsic/modules/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu/intrinsic/modules copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/xpu/intrinsic/modules/intrinsic.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/xpu/intrinsic/modules copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/tpp/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/tpp copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/tpp/fused_bert.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/tpp copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/tpp/optim.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/tpp copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/tpp/utils/__init__.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/tpp/utils copying /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/tpp/utils/blocked_layout.py -> /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/tpp/utils running build_ext running build_clib -- The C compiler identification is GNU 12.2.1 -- The CXX compiler identification is GNU 12.2.1 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/gcc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/g++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found CUDA: /usr/local/cuda (found version "12.1") -- The CUDA compiler identification is NVIDIA 12.1.105 -- Detecting CUDA compiler ABI info -- Detecting CUDA compiler ABI info - done -- Check for working CUDA compiler: /usr/local/cuda/bin/nvcc - skipped -- Detecting CUDA compile features -- Detecting CUDA compile features - done -- Caffe2: CUDA detected: 12.1 -- Caffe2: CUDA nvcc is: /usr/local/cuda/bin/nvcc -- Caffe2: CUDA toolkit directory: /usr/local/cuda -- Caffe2: Header version is: 12.1 -- USE_CUDNN is set to 0. Compiling without cuDNN support -- Added CUDA NVCC flags for: -gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 -- Found Torch: /usr/lib64/python3.10/site-packages/torch/lib/libtorch.so (Required is at least version "2.0") CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/public/utils.cmake:385 (message): In the future we will require one to explicitly pass TORCH_CUDA_ARCH_LIST to cmake instead of implicitly setting it as an env variable. This will become a FATAL_ERROR in future version of pytorch. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake:362 (torch_cuda_get_nvcc_gencode_flag) /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake:88 (include) /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:68 (find_package) CMakeLists.txt:20 (find_package) CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:22 (message): static library kineto_LIBRARY-NOTFOUND not found. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:127 (append_torchlib_if_found) CMakeLists.txt:20 (find_package) Release build. -- cmake version: 3.26.3 CMake Deprecation Warning at third_party/ideep/mkl-dnn/CMakeLists.txt:36 (cmake_policy): The OLD behavior for policy CMP0025 will be removed from a future version of CMake. The cmake-policies(7) manual explains that the OLD behaviors of all policies are deprecated and that a policy should be set to OLD only under specific short-term circumstances. Projects should be ported to the NEW behavior and not rely on setting a policy to OLD. -- DNNL_TARGET_ARCH: X64 -- DNNL_LIBRARY_NAME: dnnl -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Found OpenMP_C: -fopenmp (found version "4.5") -- Found OpenMP_CXX: -fopenmp (found version "4.5") -- Found OpenMP: TRUE (found version "4.5") -- Could NOT find Doxyrest (missing: DOXYREST_EXECUTABLE) -- Found PythonInterp: /usr/bin/python3 (found suitable version "3.10.12", minimum required is "2.7") -- Could NOT find Sphinx (missing: SPHINX_EXECUTABLE) -- Found Git: /usr/bin/git (found version "2.40.1") -- Enabled workload: TRAINING -- Enabled primitives: ALL -- Enabled primitive CPU ISA: ALL -- Enabled primitive GPU ISA: ALL -- Primitive cache is enabled -- The ASM compiler identification is GNU -- Found assembler: /usr/bin/gcc -- Looking for /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/include/oneapi/dnnl/dnnl_graph.h -- Looking for /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/include/oneapi/dnnl/dnnl_graph.h - found -- Looking for /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/include/oneapi/dnnl/dnnl_graph_types.h -- Looking for /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/include/oneapi/dnnl/dnnl_graph_types.h - found -- Looking for C++ include /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/include/oneapi/dnnl/dnnl_graph.hpp -- Looking for C++ include /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/include/oneapi/dnnl/dnnl_graph.hpp - found -- Could NOT find Doxygen (missing: DOXYGEN_EXECUTABLE) -- Cannot find Doxygen package -- DNNL_GRAPH_BUILD_FOR_CI is set to be OFF -- Compiling oneDNN Graph with CPU runtime OMP support -- Compiling oneDNN Graph with GPU runtime NONE support -- Graph compiler backend is disabled. -- Set version definitions to /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/src/utils/verbose.cpp -- Compiled partition cache is enabled -- Performing Test C_HAS_AVX2_1 -- Performing Test C_HAS_AVX2_1 - Failed -- Performing Test C_HAS_AVX2_2 -- Performing Test C_HAS_AVX2_2 - Success -- Performing Test CXX_HAS_AVX2_1 -- Performing Test CXX_HAS_AVX2_1 - Failed -- Performing Test CXX_HAS_AVX2_2 -- Performing Test CXX_HAS_AVX2_2 - Success -- Performing Test C_HAS_AVX2_VNNI_1 -- Performing Test C_HAS_AVX2_VNNI_1 - Failed -- Performing Test C_HAS_AVX2_VNNI_2 -- Performing Test C_HAS_AVX2_VNNI_2 - Success -- Performing Test CXX_HAS_AVX2_VNNI_1 -- Performing Test CXX_HAS_AVX2_VNNI_1 - Failed -- Performing Test CXX_HAS_AVX2_VNNI_2 -- Performing Test CXX_HAS_AVX2_VNNI_2 - Success -- Performing Test C_HAS_AVX512_1 -- Performing Test C_HAS_AVX512_1 - Failed -- Performing Test C_HAS_AVX512_2 -- Performing Test C_HAS_AVX512_2 - Success -- Performing Test CXX_HAS_AVX512_1 -- Performing Test CXX_HAS_AVX512_1 - Failed -- Performing Test CXX_HAS_AVX512_2 -- Performing Test CXX_HAS_AVX512_2 - Success -- Performing Test C_HAS_AVX512_VNNI_1 -- Performing Test C_HAS_AVX512_VNNI_1 - Failed -- Performing Test C_HAS_AVX512_VNNI_2 -- Performing Test C_HAS_AVX512_VNNI_2 - Success -- Performing Test CXX_HAS_AVX512_VNNI_1 -- Performing Test CXX_HAS_AVX512_VNNI_1 - Failed -- Performing Test CXX_HAS_AVX512_VNNI_2 -- Performing Test CXX_HAS_AVX512_VNNI_2 - Success -- Performing Test C_HAS_AVX512_BF16_1 -- Performing Test C_HAS_AVX512_BF16_1 - Failed -- Performing Test C_HAS_AVX512_BF16_2 -- Performing Test C_HAS_AVX512_BF16_2 - Success -- Performing Test CXX_HAS_AVX512_BF16_1 -- Performing Test CXX_HAS_AVX512_BF16_1 - Failed -- Performing Test CXX_HAS_AVX512_BF16_2 -- Performing Test CXX_HAS_AVX512_BF16_2 - Success -- Performing Test C_HAS_AMX_1 -- Performing Test C_HAS_AMX_1 - Success -- Performing Test CXX_HAS_AMX_1 -- Performing Test CXX_HAS_AMX_1 - Success -- Performing Test COMPILER_SUPPORTS_NO_AVX256_SPLIT -- Performing Test COMPILER_SUPPORTS_NO_AVX256_SPLIT - Success -- IPEX_CPU_CPP_TPP_SRCS: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/bert/fused_bert.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/common_loops.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/init.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/jit_compile.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/optim.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp CMake Warning at cmake/ClangFormat.cmake:19 (message): Please install clang-format-12 before contributing to IPEX! Call Stack (most recent call first): csrc/cpu/CMakeLists.txt:103 (include) -- Using ATen parallel backend: OMP -- -- ******** General Summary ******** -- General: -- CMake version : 3.26.3 -- CMake command : /usr/bin/cmake -- System : Linux -- Platform : Linux-4.18.0-305.19.1.el8_4.x86_64-x86_64-with-glibc2.36 -- Target name : intel_extension_for_pytorch -- Target version : 2.0.100 -- Install path : /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch -- Build type : Release -- Options: -- BUILD_WITH_CPU : ON -- BUILD_WITH_XPU : OFF -- BUILD_NO_CLANGFORMAT : OFF -- BUILD_STATS : OFF -- BUILD_STRIPPED_BIN : OFF -- -- ******** Summary on CPU ******** -- General: -- C compiler : /usr/bin/gcc -- C++ compiler : /usr/bin/g++ -- C++ compiler ID : GNU -- C++ compiler version : 12.2.1 -- CXX flags : -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -fPIC -Wno-narrowing -Wall -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-ignored-qualifiers -Wno-stringop-overflow -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fopenmp -faligned-new -Wno-unused-but-set-variable -Wno-uninitialized -fno-math-errno -fno-trapping-math -DNDEBUG -D_GLIBCXX_USE_CXX11_ABI=1 -- Compile definitions : AT_PARALLEL_OPENMP=1 -- CXX Linker options : -Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 -Wl,-Bsymbolic-functions -Wl,--disable-new-dtags -- Link libraries : /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/libxsmm/lib/libxsmm.a;dnnl_graph;-Wl,--start-group;/builddir/build/BUILD/mkl/lib/libmkl_intel_lp64.a;/builddir/build/BUILD/mkl/lib/libmkl_core.a;/builddir/build/BUILD/mkl/lib/libmkl_gnu_thread.a;-Wl,--end-group;/usr/lib64/python3.10/site-packages/torch/lib/libtorch_cpu.so;/usr/lib64/python3.10/site-packages/torch/lib/libc10.so -- Torch version : 2.0.1 -- Torch include : /usr/lib64/python3.10/site-packages/torch/include;/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -- oneDNN include : /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/include -- oneMKL include : /builddir/build/BUILD/mkl/include -- Options: -- BUILD_STATIC_ONEMKL : ON -- IPEX_DISP_OP : OFF -- -- ******************************** Intel oneMKL found. -- Configuring done (30.1s) -- Generating done (0.2s) CMake Warning: Manually-specified variables were not used by the project: IPEX_INSTALL_LIBDIR LIBIPEX_GITREV LIBIPEX_VERSION -- Build files have been written to: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu -- The C compiler identification is GNU 12.2.1 -- The CXX compiler identification is GNU 12.2.1 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/gcc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/g++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found CUDA: /usr/local/cuda (found version "12.1") -- The CUDA compiler identification is NVIDIA 12.1.105 -- Detecting CUDA compiler ABI info -- Detecting CUDA compiler ABI info - done -- Check for working CUDA compiler: /usr/local/cuda/bin/nvcc - skipped -- Detecting CUDA compile features -- Detecting CUDA compile features - done -- Caffe2: CUDA detected: 12.1 -- Caffe2: CUDA nvcc is: /usr/local/cuda/bin/nvcc -- Caffe2: CUDA toolkit directory: /usr/local/cuda -- Caffe2: Header version is: 12.1 -- USE_CUDNN is set to 0. Compiling without cuDNN support CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/public/utils.cmake:385 (message): In the future we will require one to explicitly pass TORCH_CUDA_ARCH_LIST to cmake instead of implicitly setting it as an env variable. This will become a FATAL_ERROR in future version of pytorch. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake:362 (torch_cuda_get_nvcc_gencode_flag) /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake:88 (include) /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:68 (find_package) CMakeLists.txt:25 (find_package) -- Added CUDA NVCC flags for: -gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:22 (message): static library kineto_LIBRARY-NOTFOUND not found. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:127 (append_torchlib_if_found) CMakeLists.txt:25 (find_package) -- Found Torch: /usr/lib64/python3.10/site-packages/torch/lib/libtorch.so -- Found Python: /usr/bin/python3.10 (found version "3.10.12") found components: Interpreter -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE Intel oneMKL found. -- Configuring done (11.6s) -- Generating done (0.0s) CMake Warning: Manually-specified variables were not used by the project: IPEX_INSTALL_LIBDIR IPEX_PROJ_NAME LIBIPEX_GITREV LIBIPEX_VERSION PROJECT_DIR PYTHON_INCLUDE_DIR PYTHON_PLATFORM_INFO -- Build files have been written to: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/tests/cpu/cpp -- The C compiler identification is GNU 12.2.1 -- The CXX compiler identification is GNU 12.2.1 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/gcc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/g++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found CUDA: /usr/local/cuda (found version "12.1") -- The CUDA compiler identification is NVIDIA 12.1.105 -- Detecting CUDA compiler ABI info -- Detecting CUDA compiler ABI info - done -- Check for working CUDA compiler: /usr/local/cuda/bin/nvcc - skipped -- Detecting CUDA compile features -- Detecting CUDA compile features - done -- Caffe2: CUDA detected: 12.1 -- Caffe2: CUDA nvcc is: /usr/local/cuda/bin/nvcc -- Caffe2: CUDA toolkit directory: /usr/local/cuda -- Caffe2: Header version is: 12.1 -- USE_CUDNN is set to 0. Compiling without cuDNN support CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/public/utils.cmake:385 (message): In the future we will require one to explicitly pass TORCH_CUDA_ARCH_LIST to cmake instead of implicitly setting it as an env variable. This will become a FATAL_ERROR in future version of pytorch. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake:362 (torch_cuda_get_nvcc_gencode_flag) /usr/lib64/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake:88 (include) /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:68 (find_package) CMakeLists.txt:20 (find_package) -- Added CUDA NVCC flags for: -gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:22 (message): static library kineto_LIBRARY-NOTFOUND not found. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:127 (append_torchlib_if_found) CMakeLists.txt:20 (find_package) -- Found Torch: /usr/lib64/python3.10/site-packages/torch/lib/libtorch.so (Required is at least version "2.0") -- Found pybind11: /usr/lib64/python3.10/site-packages/torch/include -- pybind11 found -- -- ******** General Summary ******** -- General: -- CMake version : 3.26.3 -- CMake command : /usr/bin/cmake -- System : Linux -- Platform : Linux-4.18.0-305.19.1.el8_4.x86_64-x86_64-with-glibc2.36 -- Target name : intel_extension_for_pytorch -- Target version : 2.0.100 -- Install path : /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch -- Build type : Release -- Options: -- BUILD_WITH_CPU : ON -- BUILD_WITH_XPU : OFF -- BUILD_NO_CLANGFORMAT : OFF -- BUILD_STATS : OFF -- BUILD_STRIPPED_BIN : OFF -- -- ******** Summary on Python ******** -- General: -- C compiler : /usr/bin/gcc -- C++ compiler : /usr/bin/g++ -- C++ compiler ID : GNU -- C++ compiler version : 12.2.1 -- CXX flags : -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GLIBCXX_USE_CXX11_ABI=1 -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1017\" -- CXX Linker options : -Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 -Wl,--disable-new-dtags -- Link libraries : intel-ext-pt-cpu -- Torch version : 2.0.1 -- Torch include : /usr/lib64/python3.10/site-packages/torch/include;/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -- Python include : /usr/include/python3.10 -- pybind11 include : /usr/lib64/python3.10/site-packages/torch/include -- -- *********************************** -- Configuring done (10.1s) Release build. CMake Warning at cmake/ClangFormat.cmake:19 (message): Please install clang-format-12 before contributing to IPEX! Call Stack (most recent call first): intel_extension_for_pytorch/csrc/CMakeLists.txt:33 (include) -- Generating done (0.0s) CMake Warning: Manually-specified variables were not used by the project: IPEX_INSTALL_LIBDIR LIBIPEX_GITREV LIBIPEX_VERSION PYTHON_EXECUTABLE -- Build files have been written to: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc [ 0%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/allocator.cpp.o [ 0%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/common.cpp.o [ 0%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/constant_cache.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/backend.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/engine.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/graph.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/batch_normalization.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/bfloat16.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/amx_tile_configure.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/brgemm/brgemm.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/dnnl_backend.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/binary.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/logical_tensor.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/bfloat16.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/fake/CMakeFiles/dnnl_graph_backend_fake.dir/fake_backend.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/op.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_batch_normalization_list.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/binary_injector_utils.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/utils/CMakeFiles/dnnl_graph_utils.dir/debug.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_batch_normalization_utils.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/brgemm/brgemm_utils.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/broadcast_strategy.cpp.o [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/dnnl_shape_infer.cpp.o [ 1%] Creating directories for 'libxsmm' [ 1%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/brgemm/jit_brdgmm_kernel.cpp.o [ 2%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/fusion_info.cpp.o [ 3%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/utils/CMakeFiles/dnnl_graph_utils.dir/id.cpp.o [ 3%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/cache_blob_id.cpp.o [ 3%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/layout_propagator.cpp.o [ 3%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/utils/CMakeFiles/dnnl_graph_utils.dir/pm/nested_matcher.cpp.o [ 3%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/op_schema.cpp.o [ 3%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/concat.cpp.o [ 3%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/partition.cpp.o [ 3%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/brgemm/jit_brgemm_amx_uker.cpp.o [ 4%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/brgemm/jit_brgemm_kernel.cpp.o [ 4%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/partition_cache.cpp.o [ 4%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/cpu_barrier.cpp.o [ 4%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/utils/CMakeFiles/dnnl_graph_utils.dir/pm/pass_manager.cpp.o [ 4%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/op_executable.cpp.o [ 4%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/convolution.cpp.o [ 4%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_binary_list.cpp.o [ 5%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/partition_hashing.cpp.o [ 5%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/cpu_isa_traits.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/convolution_pd.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/utils/CMakeFiles/dnnl_graph_utils.dir/pm/pbuilder.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/partition_impl.cpp.o [ 6%] No download step for 'libxsmm' [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/cpu_reducer.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/subgraph.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/passes/compile_ops.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_concat.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/utils/CMakeFiles/dnnl_graph_utils.dir/rw_mutex.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/shape_infer.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/deconvolution.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/amx/jit_avx512_core_amx_copy_kern.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/utils/CMakeFiles/dnnl_graph_utils.dir/utils.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/passes/constant_propagation.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/dnnl_debug.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/stream.cpp.o [ 6%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/passes/insert_ops.cpp.o [ 7%] No update step for 'libxsmm' [ 7%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/utils/CMakeFiles/dnnl_graph_utils.dir/verbose.cpp.o [ 7%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/tensor.cpp.o [ 7%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_convolution_list.cpp.o [ 7%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/amx/jit_avx512_core_amx_gemm_kern.cpp.o [ 7%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/dnnl_debug_autogenerated.cpp.o [ 8%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_deconvolution_list.cpp.o [ 8%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_gemm_bf16bf16f32_kern.cpp.o [ 8%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/passes/layout_propagation.cpp.o [ 8%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_gemv_bf16bf16f32_kern.cpp.o [ 8%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/type_constraint.cpp.o [ 8%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/dnnl_threadpool.cpp.o [ 8%] No patch step for 'libxsmm' [ 9%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/passes/lower.cpp.o [ 9%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/interface/CMakeFiles/dnnl_graph_common.dir/value.cpp.o [ 9%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/eltwise.cpp.o [ 9%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_eltwise_list.cpp.o [ 9%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_engine.cpp.o [ 9%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/passes/memory_planning.cpp.o [ 9%] No configure step for 'libxsmm' [ 9%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/engine.cpp.o [ 9%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_s16_24x8_copy_at_kern_autogen.cpp.o [ 10%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_s16_24x8_copy_an_kern_autogen.cpp.o [ 10%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/experimental.cpp.o [ 10%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_inner_product_list.cpp.o [ 10%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_s16_24x8_copy_bn_kern_autogen.cpp.o [ 10%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/passes/transform.cpp.o [ 10%] Performing build step for 'libxsmm' make[3]: warning: -j0 forced in submake: resetting jobserver mode. [ 10%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_s16_24x8_copy_bt_kern_autogen.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/fpmath_mode.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_layer_normalization_list.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_s16_48x8_copy_an_kern_autogen.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/gemm.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/inner_product.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/ittnotify.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/layer_normalization.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/lrn.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/matmul.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/memory.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_lrn_list.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/passes/utils.cpp.o [ 11%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/binary_fusion.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/memory_debug.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/memory_desc.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/memory_desc_wrapper.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/memory_storage.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/bn_fusion.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/concat_fusion.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/memory_tracking.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/memory_zero_pad.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/pooling.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/prelu.cpp.o [ 12%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/conv_block_fusion.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/primitive.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/primitive_attr.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/primitive_cache.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_pooling_list.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_s16_48x8_copy_at_kern_autogen.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/primitive_desc_iface.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/primitive_exec_types.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/primitive_hashing.cpp.o [ 13%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/primitive_iface.cpp.o [ 14%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/conv_post_ops_fusion.cpp.o [ 14%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/convtranspose_fusion.cpp.o [ 14%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_s16_48x8_copy_bn_kern_autogen.cpp.o [ 14%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/bf16/jit_avx512_core_s16_48x8_copy_bt_kern_autogen.cpp.o [ 14%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/query.cpp.o [ 15%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/reduction.cpp.o [ 15%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/reorder.cpp.o [ 15%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/resampling.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx2_f32_copy_an_kern_autogen.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/eltwise_fusion.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/interpolate_fusion.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx2_f32_copy_at_kern_autogen.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/rnn.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx2_f32_copy_bn_kern_autogen.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx2_f32_copy_bt_kern_autogen.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/rw_mutex.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/scratchpad.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/scratchpad_debug.cpp.o [ 16%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/serialization.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/shuffle.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/layernorm_fusion.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/softmax.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/stream.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/sum.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx2_kernel_sgemm_kern.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx512_common_gemm_f32.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/utils.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/verbose.cpp.o [ 17%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/matmul_fusion.cpp.o [ 17%] Building C object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/ittnotify/ittnotify_static.c.o [ 17%] Building C object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/ittnotify/jitprofiling.c.o [ 18%] Building ASM object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/CMakeFiles/dnnl_common.dir/ittnotify/ittptmark64.S.o [ 18%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/pool_fusion.cpp.o [ 18%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/quantize_fusion.cpp.o [ 18%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx512_core_f32_copy_an_kern_autogen.cpp.o [ 18%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx512_core_f32_copy_at_kern_part1_autogen.cpp.o [ 19%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx512_core_f32_copy_at_kern_part2_autogen.cpp.o [ 19%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx512_core_f32_copy_bn_kern_autogen.cpp.o [ 19%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_prelu_list.cpp.o [ 20%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/reduction_fusion.cpp.o [ 20%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/reorder_fusion.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_reduction_list.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_resampling_list.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_rnn_list.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx512_core_f32_copy_bt_kern_autogen.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx512_core_gemm_smalln_tn_f32_kern.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_f32_copy_an_kern_autogen.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_shuffle_list.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_softmax_list.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/shuffle_fusion.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/cpu_sum.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/single_op_pattern.cpp.o [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/float16.cpp.o [ 21%] Built target dnnl_graph_utils [ 21%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm_convolution.cpp.o [ 22%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm_convolution_utils.cpp.o [ 22%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_f32_copy_at_kern_autogen.cpp.o [ 22%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_f32_copy_bn_kern_autogen.cpp.o [ 22%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm_inner_product.cpp.o [ 22%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/softmax_fusion.cpp.o [ 22%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_f32_copy_bt_kern_autogen.cpp.o ================================================================================ LIBXSMM master-1.17-2648 (Linux@iZ2ze8vdmdyl66lfybi1hzZ) -------------------------------------------------------------------------------- GNU Compiler Collection: gcc 12.2.1, and g++ 12.2.1 C / C++ target: -mfma -mavx512f -mavx512cd -mavx512dq -mavx512bw -mavx512vl Fortran Compiler is disabled or missing: no Fortran interface is built! -------------------------------------------------------------------------------- Environment: CC CXX -------------------------------------------------------------------------------- --- LIBXSMM build log [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_gemm_f32.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_gemv_t_f32_kern.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm_inner_product_utils.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/src/backend/dnnl/CMakeFiles/dnnl_graph_backend_dnnl.dir/patterns/sum_fusion.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_kernel_b0_sgemm_kern_part1_autogen.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_kernel_b0_sgemm_kern_part2_autogen.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm_x8s8s32x_conv_zp_src_pad_comp.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_kernel_sgemm_kern_part1_autogen.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm_x8s8s32x_convolution.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_avx_kernel_sgemm_kern_part2_autogen.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_sse41_f32_copy_an_kern_autogen.cpp.o [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_sse41_f32_copy_at_kern_autogen.cpp.o [ 23%] Built target dnnl_graph_backend_fake [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm_x8s8s32x_convolution_utils.cpp.o /usr/bin/ar: creating lib/libxsmmnoblas.a [ 23%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm_x8s8s32x_inner_product.cpp.o [ 24%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_sse41_f32_copy_bn_kern_autogen.cpp.o [ 24%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_sse41_f32_copy_bt_kern_autogen.cpp.o [ 24%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_sse41_gemv_n_f32_kern.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/dnnl_thread.hpp:24, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/gemm_inner_product.cpp:18: In function ‘void dnnl::impl::utils::array_copy(T*, const T*, size_t) [with T = long int]’, inlined from ‘dnnl::impl::status_t dnnl::impl::memory_desc_init_by_blocking_desc(memory_desc_t&, const blocking_desc_t&)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/type_helpers.hpp:792:22, inlined from ‘dnnl::impl::cpu::{anonymous}::transpose_md(dnnl::impl::memory_desc_t&)::’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/cpu_inner_product_pd.hpp:104:42: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/utils.hpp:202:16: warning: ‘void* __builtin_memcpy(void*, const void*, long unsigned int)’ accessing 18446744056529682432 or more bytes at offsets 320 and 0 overlaps 9223372002495037441 bytes at offset -9223372019674906625 [-Wrestrict] 202 | dst[i] = src[i]; | ~~~~~~~^~~~~~ In function ‘void dnnl::impl::utils::array_copy(T*, const T*, size_t) [with T = long int]’, inlined from ‘dnnl::impl::status_t dnnl::impl::memory_desc_init_by_blocking_desc(memory_desc_t&, const blocking_desc_t&)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/type_helpers.hpp:795:22, inlined from ‘dnnl::impl::cpu::{anonymous}::transpose_md(dnnl::impl::memory_desc_t&)::’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/cpu_inner_product_pd.hpp:104:42: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/utils.hpp:202:16: warning: ‘void* __builtin_memcpy(void*, const void*, long unsigned int)’ accessing 18446744056529682432 or more bytes at offsets 0 and 112 overlaps 9223372002495037441 bytes at offset -9223372019674906625 [-Wrestrict] 202 | dst[i] = src[i]; | ~~~~~~~^~~~~~ [ 24%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/nchw_pooling.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ncsp_batch_normalization.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/nhwc_pooling.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/nspc_batch_normalization.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_sse41_gemv_t_f32_kern.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_sse41_kernel_b0_sgemm_kern_autogen.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/f32/jit_sse41_kernel_sgemm_kern_autogen.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/engine_id.hpp:23, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/engine.hpp:27, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/cpu_engine.hpp:26, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/cpu_inner_product_list.cpp:17: In function ‘void dnnl::impl::utils::array_copy(T*, const T*, size_t) [with T = long int]’, inlined from ‘dnnl::impl::status_t dnnl::impl::memory_desc_init_by_blocking_desc(memory_desc_t&, const blocking_desc_t&)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/type_helpers.hpp:792:22, inlined from ‘dnnl::impl::cpu::{anonymous}::transpose_md(dnnl::impl::memory_desc_t&)::’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/cpu_inner_product_pd.hpp:104:42: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/utils.hpp:202:16: warning: ‘void* __builtin_memcpy(void*, const void*, long unsigned int)’ accessing 18446744056529682432 or more bytes at offsets 320 and 0 overlaps 9223372002495037441 bytes at offset -9223372019674906625 [-Wrestrict] 202 | dst[i] = src[i]; | ~~~~~~~^~~~~~ In function ‘void dnnl::impl::utils::array_copy(T*, const T*, size_t) [with T = long int]’, inlined from ‘dnnl::impl::status_t dnnl::impl::memory_desc_init_by_blocking_desc(memory_desc_t&, const blocking_desc_t&)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/type_helpers.hpp:795:22, inlined from ‘dnnl::impl::cpu::{anonymous}::transpose_md(dnnl::impl::memory_desc_t&)::’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/cpu_inner_product_pd.hpp:104:42: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/utils.hpp:202:16: warning: ‘void* __builtin_memcpy(void*, const void*, long unsigned int)’ accessing 18446744056529682432 or more bytes at offsets 0 and 112 overlaps 9223372002495037441 bytes at offset -9223372019674906625 [-Wrestrict] 202 | dst[i] = src[i]; | ~~~~~~~^~~~~~ [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/platform.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/gemm_driver.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/primitive_attr_postops.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_batch_normalization.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_binary.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/gemm_info.cpp.o [ 25%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_convolution.cpp.o [ 26%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_convolution_int8.cpp.o [ 26%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_deconvolution.cpp.o [ 26%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_eltwise.cpp.o [ 26%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_inner_product.cpp.o [ 26%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_inner_product_int8.cpp.o [ 26%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_layer_normalization.cpp.o [ 26%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_lrn.cpp.o [ 26%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_pooling.cpp.o [ 27%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_prelu.cpp.o [ 27%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_reduction.cpp.o [ 27%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_resampling.cpp.o [ 27%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_shuffle.cpp.o [ 27%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/ref_softmax.cpp.o [ 27%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/scale_utils.cpp.o [ 28%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/gemm_pack.cpp.o [ 28%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/gemv_driver.cpp.o [ 28%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/simple_concat.cpp.o [ 28%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/simple_layer_normalization.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/simple_resampling.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/simple_sum.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/zero_point_utils.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_gemm_s8u8s32_kern.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_u8_copy_an_kern_autogen.cpp.o /usr/bin/ar: creating lib/libxsmmgen.a [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_u8_copy_at_kern_autogen.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_u8_copy_bn_kern_autogen.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_u8_copy_bt_kern_autogen.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm/f32/gemm_utils_f32.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_u8_copy_sum_an_kern_autogen.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm/f32/ref_gemm_f32.cpp.o [ 29%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm/gemm.cpp.o [ 30%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_u8_copy_sum_at_kern_autogen.cpp.o [ 30%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_u8_copy_sum_bn_kern_autogen.cpp.o [ 30%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm/gemm_pack.cpp.o [ 30%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm/s8x8s32/ref_gemm_s8x8s32.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/gemm/s8x8s32/simple_gemm_s8s8s32.cpp.o [ 31%] Built target dnnl_graph_common [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/matmul/cpu_matmul_list.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/matmul/gemm_bf16_matmul.cpp.o /usr/bin/ar: creating lib/libxsmm.a [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_u8_copy_sum_bt_kern_autogen.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/matmul/gemm_f32_matmul.cpp.o /usr/bin/ar: creating lib/libxsmmext.a [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/matmul/gemm_x8s8s32x_matmul.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/matmul/ref_matmul.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/matmul/ref_matmul_int8.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_vnni_u8_copy_an_kern_autogen.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_vnni_u8_copy_at_kern_autogen.cpp.o [ 31%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_vnni_u8_copy_bn_kern_autogen.cpp.o [ 32%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_comp_bf16_s8.cpp.o [ 32%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_comp_f32_s8.cpp.o [ 32%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_vnni_u8_copy_bt_kern_autogen.cpp.o [ 32%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_vnni_u8_copy_sum_an_kern_autogen.cpp.o ================================================================================ LIBXSMM master-1.17-2648 (Linux@iZ2ze8vdmdyl66lfybi1hzZ) -------------------------------------------------------------------------------- GNU Compiler Collection: gcc 12.2.1, and g++ 12.2.1 C / C++ target: -mfma -mavx512f -mavx512cd -mavx512dq -mavx512bw -mavx512vl Fortran Compiler is disabled or missing: no Fortran interface is built! -------------------------------------------------------------------------------- Environment: CC CXX -------------------------------------------------------------------------------- [ 32%] No install step for 'libxsmm' [ 32%] Completed 'libxsmm' [ 32%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_comp_s8_s8.cpp.o [ 32%] Built target libxsmm [ 32%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_bf16.cpp.o [ 32%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_f16.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_vnni_u8_copy_sum_at_kern_autogen.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_f32_bf16.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_vnni_u8_copy_sum_bn_kern_autogen.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx2_vnni_u8_copy_sum_bt_kern_autogen.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_gemm_s8u8s32_kern.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_gemv_s8x8s32.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_kernel_gemv_s8x8s32_kern.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_f32_f16.cpp.o [ 33%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_f32_f32.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_f32_s32.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_f32_s8.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_f32_u8.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_s32.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_s8.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/reorder/cpu_reorder_regular_u8.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_u8_copy_an_kern_autogen.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/brgemm_cell_common.cpp.o [ 34%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/cell_common.cpp.o [ 35%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/cell_gru.cpp.o [ 35%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_u8_copy_at_kern_autogen.cpp.o [ 35%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/cell_gru_lbr.cpp.o [ 35%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/ref_postgemm_gru.cpp.o [ 35%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/ref_postgemm_gru_lbr.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_u8_copy_bn_kern_autogen.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_u8_copy_bt_kern_autogen.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_u8_copy_sum_an_kern_autogen.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_u8_copy_sum_at_kern_autogen.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_u8_copy_sum_bn_kern_autogen.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/ref_postgemm_lstm.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx512_core_u8_copy_sum_bt_kern_autogen.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_kernel_b0_b_gemm_s8u8s32_kern_autogen.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_kernel_b0_c_gemm_s8u8s32_kern_autogen.cpp.o [ 36%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/ref_postgemm_lstm_projection.cpp.o [ 37%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_kernel_b0_gemm_s8u8s32_kern_autogen.cpp.o [ 37%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_kernel_b0_r_gemm_s8u8s32_kern_autogen.cpp.o [ 37%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/ref_postgemm_rnn.cpp.o [ 37%] Built target dnnl_common [ 37%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_kernel_b_gemm_s8u8s32_kern_autogen.cpp.o [ 37%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/ref_rnn.cpp.o [ 38%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/rnn/rnn_utils.cpp.o [ 38%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/jit_utils/jit_utils.cpp.o [ 38%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/CMakeFiles/dnnl_cpu.dir/jit_utils/linux_perf/linux_perf.cpp.o [ 38%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_kernel_c_gemm_s8u8s32_kern_autogen.cpp.o [ 38%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_kernel_gemm_s8u8s32_kern_autogen.cpp.o [ 38%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_kernel_r_gemm_s8u8s32_kern_autogen.cpp.o [ 38%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_u8_copy_an_kern_autogen.cpp.o [ 38%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_u8_copy_at_kern_autogen.cpp.o [ 39%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_u8_copy_bn_kern_autogen.cpp.o [ 39%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_u8_copy_bt_kern_autogen.cpp.o [ 39%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_u8_copy_sum_an_kern_autogen.cpp.o [ 39%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_u8_copy_sum_at_kern_autogen.cpp.o [ 39%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_u8_copy_sum_bn_kern_autogen.cpp.o [ 39%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_avx_u8_copy_sum_bt_kern_autogen.cpp.o [ 39%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_kernel_b0_b_gemm_s8u8s32_kern_autogen.cpp.o [ 39%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_kernel_b0_c_gemm_s8u8s32_kern_autogen.cpp.o [ 40%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_kernel_b0_gemm_s8u8s32_kern_autogen.cpp.o [ 40%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_kernel_b0_r_gemm_s8u8s32_kern_autogen.cpp.o [ 40%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_kernel_b_gemm_s8u8s32_kern_autogen.cpp.o [ 40%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_kernel_c_gemm_s8u8s32_kern_autogen.cpp.o [ 40%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_kernel_gemm_s8u8s32_kern_autogen.cpp.o [ 40%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_kernel_r_gemm_s8u8s32_kern_autogen.cpp.o [ 40%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_u8_copy_an_kern_autogen.cpp.o [ 40%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_u8_copy_at_kern_autogen.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_u8_copy_bn_kern_autogen.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_u8_copy_bt_kern_autogen.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_u8_copy_sum_an_kern_autogen.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_u8_copy_sum_at_kern_autogen.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_u8_copy_sum_bn_kern_autogen.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm/s8x8s32/jit_sse41_u8_copy_sum_bt_kern_autogen.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm_bf16_convolution.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/gemm_bf16_inner_product.cpp.o [ 41%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/injectors/injector_utils.cpp.o [ 42%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/injectors/jit_uni_binary_injector.cpp.o [ 42%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/injectors/jit_uni_eltwise_injector.cpp.o [ 42%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/injectors/jit_uni_postops_injector.cpp.o [ 42%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/ip_convolution.cpp.o [ 42%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx2_1x1_conv_kernel_f32.cpp.o [ 42%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx2_1x1_convolution.cpp.o [ 42%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx2_conv_kernel_f32.cpp.o [ 42%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx2_convolution.cpp.o [ 43%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_common_1x1_conv_kernel.cpp.o [ 43%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_common_1x1_convolution.cpp.o [ 43%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_common_conv_kernel.cpp.o [ 43%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_common_convolution.cpp.o [ 43%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_amx_1x1_conv_kernel.cpp.o [ 43%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_amx_1x1_convolution.cpp.o [ 43%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_amx_conv_kernel.cpp.o [ 43%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_amx_convolution.cpp.o [ 44%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_amx_deconvolution.cpp.o [ 44%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_bf16_1x1_conv_kernel.cpp.o [ 44%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_bf16_1x1_convolution.cpp.o [ 44%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_bf16_conv_kernel.cpp.o [ 44%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_bf16_convolution.cpp.o [ 44%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_bf16_dw_conv_kernel.cpp.o [ 44%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_bf16_sum.cpp.o [ 44%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_f32_wino_conv_2x3.cpp.o [ 45%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_f32_wino_conv_4x3.cpp.o [ 45%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_f32_wino_conv_4x3_kernel.cpp.o [ 45%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_fp16cvt.cpp.o [ 45%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_resampling.cpp.o [ 45%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_x8s8s32x_1x1_conv_kernel.cpp.o [ 45%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_x8s8s32x_1x1_convolution.cpp.o [ 45%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_x8s8s32x_conv_kernel.cpp.o [ 45%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_x8s8s32x_convolution.cpp.o [ 46%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_avx512_core_x8s8s32x_deconvolution.cpp.o [ 46%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brdgmm_dw_conv.cpp.o [ 46%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_1x1_conv.cpp.o [ 46%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv.cpp.o [ 46%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv_bwd.cpp.o [ 46%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv_bwd_strided.cpp.o [ 46%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv_bwd_trans_kernel.cpp.o [ 46%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv_bwd_utils.cpp.o [ 47%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv_bwd_w.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/dnnl_thread.hpp:24, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm_bf16_inner_product.cpp:21: In function ‘void dnnl::impl::utils::array_copy(T*, const T*, size_t) [with T = long int]’, inlined from ‘dnnl::impl::status_t dnnl::impl::memory_desc_init_by_blocking_desc(memory_desc_t&, const blocking_desc_t&)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/type_helpers.hpp:792:22, inlined from ‘dnnl::impl::cpu::{anonymous}::transpose_md(dnnl::impl::memory_desc_t&)::’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/cpu_inner_product_pd.hpp:104:42: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/utils.hpp:202:16: warning: ‘void* __builtin_memcpy(void*, const void*, long unsigned int)’ accessing 18446744056529682432 or more bytes at offsets 320 and 0 overlaps 9223372002495037441 bytes at offset -9223372019674906625 [-Wrestrict] 202 | dst[i] = src[i]; | ~~~~~~~^~~~~~ In function ‘void dnnl::impl::utils::array_copy(T*, const T*, size_t) [with T = long int]’, inlined from ‘dnnl::impl::status_t dnnl::impl::memory_desc_init_by_blocking_desc(memory_desc_t&, const blocking_desc_t&)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/type_helpers.hpp:795:22, inlined from ‘dnnl::impl::cpu::{anonymous}::transpose_md(dnnl::impl::memory_desc_t&)::’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/cpu_inner_product_pd.hpp:104:42: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/common/utils.hpp:202:16: warning: ‘void* __builtin_memcpy(void*, const void*, long unsigned int)’ accessing 18446744056529682432 or more bytes at offsets 0 and 112 overlaps 9223372002495037441 bytes at offset -9223372019674906625 [-Wrestrict] 202 | dst[i] = src[i]; | ~~~~~~~^~~~~~ [ 47%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv_comp_pad_kernel.cpp.o [ 47%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv_trans_kernel.cpp.o [ 47%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_conv_utils.cpp.o [ 47%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_deconv.cpp.o [ 47%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_inner_product.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm/f32/jit_avx512_core_f32_copy_at_kern_part1_autogen.cpp: In member function ‘void dnnl::impl::cpu::x64::jit_avx512_core_f32_copy_at_kern::generate_part1(const Xbyak::Label&, const Xbyak::Label&, const Xbyak::Label&, const Xbyak::Label&)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm/f32/jit_avx512_core_f32_copy_at_kern_part1_autogen.cpp:51:6: note: variable tracking size limit exceeded with ‘-fvar-tracking-assignments’, retrying without 51 | void jit_avx512_core_f32_copy_at_kern::generate_part1(const Xbyak::Label &l4000, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [ 47%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_inner_product_utils.cpp.o [ 47%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_brgemm_transpose_utils.cpp.o [ 48%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_gemm_inner_product_utils.cpp.o [ 48%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_gemm_x8s8s32x_conv_zp_src_pad_comp.cpp.o [ 48%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_gemm_x8s8s32x_convolution_utils.cpp.o [ 48%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_sse41_1x1_conv_kernel_f32.cpp.o [ 48%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_sse41_1x1_convolution.cpp.o [ 48%] Built target dnnl_graph_backend_dnnl [ 48%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_sse41_conv_kernel_f32.cpp.o [ 48%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_sse41_convolution.cpp.o [ 48%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_transpose_utils.cpp.o [ 49%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_batch_normalization.cpp.o [ 49%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_batch_normalization_s8.cpp.o [ 49%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_binary.cpp.o [ 49%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_binary_kernel.cpp.o [ 49%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_convert_xf16.cpp.o [ 49%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_deconv_zp_pad_str_kernel.cpp.o [ 49%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_dw_conv_kernel_f32.cpp.o [ 49%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_dw_conv_kernel_utils.cpp.o [ 50%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_dw_convolution.cpp.o [ 50%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_eltwise.cpp.o [ 50%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_eltwise_int.cpp.o [ 50%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_i8i8_pooling.cpp.o [ 50%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_layer_normalization.cpp.o [ 50%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_pooling.cpp.o [ 50%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_pool_kernel.cpp.o [ 50%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_reduction.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm/f32/jit_avx_kernel_sgemm_kern_part2_autogen.cpp: In member function ‘void dnnl::impl::cpu::x64::jit_avx_kernel_sgemm_kern::generate_part2(Xbyak::Label&, Xbyak::Label&, Xbyak::Label&)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm/f32/jit_avx_kernel_sgemm_kern_part2_autogen.cpp:26:6: note: variable tracking size limit exceeded with ‘-fvar-tracking-assignments’, retrying without 26 | void jit_avx_kernel_sgemm_kern::generate_part2( | ^~~~~~~~~~~~~~~~~~~~~~~~~ [ 51%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_reduction_kernel.cpp.o [ 51%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_reorder.cpp.o [ 51%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_reorder_utils.cpp.o [ 51%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_resampling.cpp.o [ 51%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_resampling_kernel.cpp.o [ 51%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_softmax.cpp.o [ 51%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_tbb_batch_normalization.cpp.o [ 51%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_x8s8s32x_1x1_conv_kernel.cpp.o [ 52%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_x8s8s32x_1x1_convolution.cpp.o [ 52%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_x8s8s32x_conv_kernel.cpp.o [ 52%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_x8s8s32x_convolution.cpp.o [ 52%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/jit_uni_x8s8s32x_deconvolution.cpp.o [ 52%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_avx512_common_lrn.cpp.o [ 52%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_avx512_common_lrn_bwd_base.cpp.o [ 52%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_avx512_common_lrn_bwd_blocked.cpp.o [ 52%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_avx512_common_lrn_bwd_nhwc.cpp.o [ 53%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_avx512_common_lrn_fwd_base.cpp.o [ 53%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_avx512_common_lrn_fwd_blocked.cpp.o [ 53%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_avx512_common_lrn_fwd_nhwc.cpp.o [ 53%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_uni_lrn.cpp.o [ 53%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/lrn/jit_uni_lrn_kernel.cpp.o [ 53%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/matmul/brgemm_matmul.cpp.o [ 53%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/matmul/brgemm_matmul_copy_utils.cpp.o [ 53%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/matmul/brgemm_matmul_reorders.cpp.o [ 54%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/matmul/brgemm_matmul_utils.cpp.o [ 54%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/prelu/jit_prelu_backward.cpp.o [ 54%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/prelu/jit_prelu_base_kernel.cpp.o [ 54%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/prelu/jit_prelu_forward.cpp.o [ 54%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/prelu/jit_prelu_reduction_kernel.cpp.o [ 54%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/prelu/jit_prelu_utils.cpp.o [ 54%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/prelu/jit_uni_prelu_backward_kernel.cpp.o [ 54%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/prelu/jit_uni_prelu_forward_kernel.cpp.o [ 55%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/rnn/brgemm_cell_common_bwd.cpp.o [ 55%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/rnn/brgemm_cell_common_fwd.cpp.o [ 55%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/rnn/brgemm_cell_common_reorders.cpp.o [ 55%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/rnn/brgemm_cell_common_utils.cpp.o [ 55%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/rnn/jit_brgemm_transpose_single_row.cpp.o [ 55%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/rnn/jit_diff_weights_peephole.cpp.o [ 55%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/rnn/jit_gates_reduction.cpp.o [ 55%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/rnn/rnn_brgemm_utils.cpp.o [ 56%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/shuffle/jit_uni_shuffle.cpp.o [ 56%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/shuffle/jit_uni_shuffle_kernel.cpp.o [ 56%] Building CXX object csrc/cpu/cpu_third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/CMakeFiles/dnnl_cpu_x64.dir/utils/jit_io_helper.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm/f32/jit_avx2_f32_copy_at_kern_autogen.cpp: In member function ‘virtual void dnnl::impl::cpu::x64::jit_avx2_f32_copy_at_kern::generate()’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm/f32/jit_avx2_f32_copy_at_kern_autogen.cpp:29:6: note: variable tracking size limit exceeded with ‘-fvar-tracking-assignments’, retrying without 29 | void jit_avx2_f32_copy_at_kern::generate() { | ^~~~~~~~~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm/f32/jit_avx512_core_f32_copy_an_kern_autogen.cpp: In member function ‘virtual void dnnl::impl::cpu::x64::jit_avx512_core_f32_copy_an_kern::generate()’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/third_party/oneDNN/src/cpu/x64/gemm/f32/jit_avx512_core_f32_copy_an_kern_autogen.cpp:29:6: note: variable tracking size limit exceeded with ‘-fvar-tracking-assignments’, retrying without 29 | void jit_avx512_core_f32_copy_an_kern::generate() { | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [ 56%] Built target dnnl_cpu_x64 [ 56%] Built target dnnl_cpu [ 56%] Linking CXX static library libdnnl.a [ 56%] Built target dnnl [ 56%] Linking CXX static library libdnnl_graph.a [ 56%] Built target dnnl_graph [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/csr2csckrnl.cpp.AVX2.cpp.o [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/isa_help_krnl.cpp.AVX2.cpp.o [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdagradFusedStepKrnl.cpp.AVX2.cpp.o [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/UpdateBatchKrnl.cpp.AVX2.cpp.o [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SplitSgdStepKrnl.cpp.AVX2.cpp.o [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/LambFusedStepKrnl.cpp.AVX2.cpp.o [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/dyndisp/DispatchStub.cpp.o [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SgdFusedStepKrnl.cpp.AVX2.cpp.o [ 57%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdamFusedStepKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/EmbeddingBagKrnl.cpp.AVX2.cpp.o [ 60%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RnntEmbeddingKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ROIAlignKrnl.cpp.AVX2.cpp.o [ 60%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSoftmaxKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InteractionKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/GroupNormKrnl.cpp.AVX2.cpp.o [ 60%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/TorchVisionNmsKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RMSNormKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/NmsKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CumsumKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InstanceNormKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardSGDKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/SumKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/PackedMklSgemmKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MultiHeadAttentionKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX2.cpp.o [ 58%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX2.cpp.o [ 61%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/isa_help_krnl.cpp.AVX2_VNNI.cpp.o [ 60%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX2.cpp.o [ 61%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSwishKrnl.cpp.AVX2.cpp.o [ 60%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConverterKrnl.cpp.AVX2.cpp.o [ 60%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConcatBnReluKrnl.cpp.AVX2.cpp.o [ 60%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/DivSoftmaxKrnl.cpp.AVX2.cpp.o [ 61%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddLayerNormKrnl.cpp.AVX2.cpp.o [ 61%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/csr2csckrnl.cpp.AVX2_VNNI.cpp.o [ 61%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SplitSgdStepKrnl.cpp.AVX2_VNNI.cpp.o [ 61%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SgdFusedStepKrnl.cpp.AVX2_VNNI.cpp.o [ 61%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/TorchVisionNmsKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/UpdateBatchKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdagradFusedStepKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdamFusedStepKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/SumKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/LambFusedStepKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RnntEmbeddingKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ROIAlignKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RMSNormKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/NmsKrnl.cpp.AVX2_VNNI.cpp.o [ 62%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/PackedMklSgemmKrnl.cpp.AVX2_VNNI.cpp.o [ 63%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MultiHeadAttentionKrnl.cpp.AVX2_VNNI.cpp.o [ 63%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagKrnl.cpp.AVX2_VNNI.cpp.o [ 63%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardSGDKrnl.cpp.AVX2_VNNI.cpp.o [ 63%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardKrnl.cpp.AVX2_VNNI.cpp.o [ 63%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InteractionKrnl.cpp.AVX2_VNNI.cpp.o [ 63%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InstanceNormKrnl.cpp.AVX2_VNNI.cpp.o [ 63%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX2_VNNI.cpp.o [ 63%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/GroupNormKrnl.cpp.AVX2_VNNI.cpp.o [ 64%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/EmbeddingBagKrnl.cpp.AVX2_VNNI.cpp.o [ 64%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/DivSoftmaxKrnl.cpp.AVX2_VNNI.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX2.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:8: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 64%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CumsumKrnl.cpp.AVX2_VNNI.cpp.o [ 64%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConverterKrnl.cpp.AVX2_VNNI.cpp.o [ 64%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConcatBnReluKrnl.cpp.AVX2_VNNI.cpp.o [ 64%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX2_VNNI.cpp.o [ 64%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX2_VNNI.cpp.o [ 64%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSwishKrnl.cpp.AVX2_VNNI.cpp.o [ 65%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSoftmaxKrnl.cpp.AVX2_VNNI.cpp.o [ 65%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddLayerNormKrnl.cpp.AVX2_VNNI.cpp.o [ 65%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/isa_help_krnl.cpp.AVX512.cpp.o [ 65%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/csr2csckrnl.cpp.AVX512.cpp.o [ 65%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SplitSgdStepKrnl.cpp.AVX512.cpp.o [ 65%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SgdFusedStepKrnl.cpp.AVX512.cpp.o [ 65%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/LambFusedStepKrnl.cpp.AVX512.cpp.o [ 65%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdamFusedStepKrnl.cpp.AVX512.cpp.o [ 66%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdagradFusedStepKrnl.cpp.AVX512.cpp.o [ 66%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/UpdateBatchKrnl.cpp.AVX512.cpp.o [ 66%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/TorchVisionNmsKrnl.cpp.AVX512.cpp.o [ 66%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/SumKrnl.cpp.AVX512.cpp.o [ 66%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RnntEmbeddingKrnl.cpp.AVX512.cpp.o [ 66%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ROIAlignKrnl.cpp.AVX512.cpp.o [ 66%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RMSNormKrnl.cpp.AVX512.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX2.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Pool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:17, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX2.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Copy.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX2_VNNI.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:8: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:17, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX2_VNNI.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Copy.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 66%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/PackedMklSgemmKrnl.cpp.AVX512.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX2_VNNI.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Pool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/NmsKrnl.cpp.AVX512.cpp.o [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MultiHeadAttentionKrnl.cpp.AVX512.cpp.o [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagKrnl.cpp.AVX512.cpp.o [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardSGDKrnl.cpp.AVX512.cpp.o [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardKrnl.cpp.AVX512.cpp.o [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InteractionKrnl.cpp.AVX512.cpp.o [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InstanceNormKrnl.cpp.AVX512.cpp.o [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX512.cpp.o [ 67%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/GroupNormKrnl.cpp.AVX512.cpp.o [ 68%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/EmbeddingBagKrnl.cpp.AVX512.cpp.o [ 68%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/DivSoftmaxKrnl.cpp.AVX512.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:17, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX512.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Copy.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 68%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CumsumKrnl.cpp.AVX512.cpp.o [ 68%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConverterKrnl.cpp.AVX512.cpp.o [ 68%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConcatBnReluKrnl.cpp.AVX512.cpp.o [ 68%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX512.cpp.o [ 68%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX512.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX512.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:8: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 68%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSwishKrnl.cpp.AVX512.cpp.o [ 69%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSoftmaxKrnl.cpp.AVX512.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX512.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Pool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 69%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddLayerNormKrnl.cpp.AVX512.cpp.o [ 69%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/isa_help_krnl.cpp.AVX512_VNNI.cpp.o [ 69%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/csr2csckrnl.cpp.AVX512_VNNI.cpp.o [ 69%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SplitSgdStepKrnl.cpp.AVX512_VNNI.cpp.o [ 69%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SgdFusedStepKrnl.cpp.AVX512_VNNI.cpp.o [ 69%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/LambFusedStepKrnl.cpp.AVX512_VNNI.cpp.o [ 69%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdamFusedStepKrnl.cpp.AVX512_VNNI.cpp.o [ 70%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdagradFusedStepKrnl.cpp.AVX512_VNNI.cpp.o [ 70%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/UpdateBatchKrnl.cpp.AVX512_VNNI.cpp.o [ 70%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/TorchVisionNmsKrnl.cpp.AVX512_VNNI.cpp.o [ 70%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/SumKrnl.cpp.AVX512_VNNI.cpp.o [ 70%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RnntEmbeddingKrnl.cpp.AVX512_VNNI.cpp.o [ 70%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ROIAlignKrnl.cpp.AVX512_VNNI.cpp.o [ 70%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RMSNormKrnl.cpp.AVX512_VNNI.cpp.o [ 70%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/PackedMklSgemmKrnl.cpp.AVX512_VNNI.cpp.o [ 71%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/NmsKrnl.cpp.AVX512_VNNI.cpp.o [ 71%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MultiHeadAttentionKrnl.cpp.AVX512_VNNI.cpp.o [ 71%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagKrnl.cpp.AVX512_VNNI.cpp.o [ 71%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardSGDKrnl.cpp.AVX512_VNNI.cpp.o [ 71%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardKrnl.cpp.AVX512_VNNI.cpp.o [ 71%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InteractionKrnl.cpp.AVX512_VNNI.cpp.o [ 71%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InstanceNormKrnl.cpp.AVX512_VNNI.cpp.o [ 71%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX512_VNNI.cpp.o [ 72%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/GroupNormKrnl.cpp.AVX512_VNNI.cpp.o [ 72%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/EmbeddingBagKrnl.cpp.AVX512_VNNI.cpp.o [ 72%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/DivSoftmaxKrnl.cpp.AVX512_VNNI.cpp.o [ 72%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CumsumKrnl.cpp.AVX512_VNNI.cpp.o [ 72%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConverterKrnl.cpp.AVX512_VNNI.cpp.o [ 72%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConcatBnReluKrnl.cpp.AVX512_VNNI.cpp.o [ 72%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX512_VNNI.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:17, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX512_VNNI.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Copy.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX512_VNNI.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:8: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 72%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX512_VNNI.cpp.o [ 73%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSwishKrnl.cpp.AVX512_VNNI.cpp.o [ 73%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSoftmaxKrnl.cpp.AVX512_VNNI.cpp.o [ 73%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddLayerNormKrnl.cpp.AVX512_VNNI.cpp.o [ 73%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/isa_help_krnl.cpp.AVX512_BF16.cpp.o [ 73%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/csr2csckrnl.cpp.AVX512_BF16.cpp.o [ 73%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SplitSgdStepKrnl.cpp.AVX512_BF16.cpp.o [ 73%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SgdFusedStepKrnl.cpp.AVX512_BF16.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX512_VNNI.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Pool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 73%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/LambFusedStepKrnl.cpp.AVX512_BF16.cpp.o [ 74%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdamFusedStepKrnl.cpp.AVX512_BF16.cpp.o [ 74%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdagradFusedStepKrnl.cpp.AVX512_BF16.cpp.o [ 74%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/UpdateBatchKrnl.cpp.AVX512_BF16.cpp.o [ 74%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/TorchVisionNmsKrnl.cpp.AVX512_BF16.cpp.o [ 74%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/SumKrnl.cpp.AVX512_BF16.cpp.o [ 74%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RnntEmbeddingKrnl.cpp.AVX512_BF16.cpp.o [ 74%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ROIAlignKrnl.cpp.AVX512_BF16.cpp.o [ 74%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RMSNormKrnl.cpp.AVX512_BF16.cpp.o [ 75%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/PackedMklSgemmKrnl.cpp.AVX512_BF16.cpp.o [ 75%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/NmsKrnl.cpp.AVX512_BF16.cpp.o [ 75%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MultiHeadAttentionKrnl.cpp.AVX512_BF16.cpp.o [ 75%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagKrnl.cpp.AVX512_BF16.cpp.o [ 75%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardSGDKrnl.cpp.AVX512_BF16.cpp.o [ 75%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardKrnl.cpp.AVX512_BF16.cpp.o [ 75%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InteractionKrnl.cpp.AVX512_BF16.cpp.o [ 75%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InstanceNormKrnl.cpp.AVX512_BF16.cpp.o [ 76%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX512_BF16.cpp.o [ 76%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/GroupNormKrnl.cpp.AVX512_BF16.cpp.o [ 76%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/EmbeddingBagKrnl.cpp.AVX512_BF16.cpp.o [ 76%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/DivSoftmaxKrnl.cpp.AVX512_BF16.cpp.o [ 76%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CumsumKrnl.cpp.AVX512_BF16.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:17, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AVX512_BF16.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Copy.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 76%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConverterKrnl.cpp.AVX512_BF16.cpp.o [ 76%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConcatBnReluKrnl.cpp.AVX512_BF16.cpp.o [ 76%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX512_BF16.cpp.o [ 77%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX512_BF16.cpp.o [ 77%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSwishKrnl.cpp.AVX512_BF16.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AVX512_BF16.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:8: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 77%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSoftmaxKrnl.cpp.AVX512_BF16.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AVX512_BF16.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Pool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 77%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddLayerNormKrnl.cpp.AVX512_BF16.cpp.o [ 77%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/isa_help_krnl.cpp.AMX.cpp.o [ 77%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/csr2csckrnl.cpp.AMX.cpp.o [ 77%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SplitSgdStepKrnl.cpp.AMX.cpp.o [ 77%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SgdFusedStepKrnl.cpp.AMX.cpp.o [ 78%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/LambFusedStepKrnl.cpp.AMX.cpp.o [ 78%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdamFusedStepKrnl.cpp.AMX.cpp.o [ 78%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdagradFusedStepKrnl.cpp.AMX.cpp.o [ 78%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/UpdateBatchKrnl.cpp.AMX.cpp.o [ 78%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/TorchVisionNmsKrnl.cpp.AMX.cpp.o [ 78%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/SumKrnl.cpp.AMX.cpp.o [ 78%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RnntEmbeddingKrnl.cpp.AMX.cpp.o [ 78%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ROIAlignKrnl.cpp.AMX.cpp.o [ 79%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RMSNormKrnl.cpp.AMX.cpp.o [ 79%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/PackedMklSgemmKrnl.cpp.AMX.cpp.o [ 79%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/NmsKrnl.cpp.AMX.cpp.o [ 79%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MultiHeadAttentionKrnl.cpp.AMX.cpp.o [ 79%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagKrnl.cpp.AMX.cpp.o [ 79%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardSGDKrnl.cpp.AMX.cpp.o [ 79%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardKrnl.cpp.AMX.cpp.o [ 79%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InteractionKrnl.cpp.AMX.cpp.o [ 80%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InstanceNormKrnl.cpp.AMX.cpp.o [ 80%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AMX.cpp.o [ 80%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/GroupNormKrnl.cpp.AMX.cpp.o [ 80%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/EmbeddingBagKrnl.cpp.AMX.cpp.o [ 80%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/DivSoftmaxKrnl.cpp.AMX.cpp.o [ 80%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CumsumKrnl.cpp.AMX.cpp.o [ 80%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConverterKrnl.cpp.AMX.cpp.o [ 80%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConcatBnReluKrnl.cpp.AMX.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:17, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.AMX.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Copy.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 81%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AMX.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.AMX.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:8: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 81%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AMX.cpp.o [ 81%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSwishKrnl.cpp.AMX.cpp.o [ 81%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSoftmaxKrnl.cpp.AMX.cpp.o [ 81%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddLayerNormKrnl.cpp.AMX.cpp.o [ 81%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/isa_help_krnl.cpp.DEFAULT.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.AMX.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Pool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 81%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/utils/csr2csckrnl.cpp.DEFAULT.cpp.o [ 81%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SplitSgdStepKrnl.cpp.DEFAULT.cpp.o [ 82%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/SgdFusedStepKrnl.cpp.DEFAULT.cpp.o [ 82%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/LambFusedStepKrnl.cpp.DEFAULT.cpp.o [ 82%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdamFusedStepKrnl.cpp.DEFAULT.cpp.o [ 82%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/optimizer/AdagradFusedStepKrnl.cpp.DEFAULT.cpp.o [ 82%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/UpdateBatchKrnl.cpp.DEFAULT.cpp.o [ 82%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/TorchVisionNmsKrnl.cpp.DEFAULT.cpp.o [ 82%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/SumKrnl.cpp.DEFAULT.cpp.o [ 82%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RnntEmbeddingKrnl.cpp.DEFAULT.cpp.o [ 83%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ROIAlignKrnl.cpp.DEFAULT.cpp.o [ 83%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/RMSNormKrnl.cpp.DEFAULT.cpp.o [ 83%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/PackedMklSgemmKrnl.cpp.DEFAULT.cpp.o [ 83%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/NmsKrnl.cpp.DEFAULT.cpp.o [ 83%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MultiHeadAttentionKrnl.cpp.DEFAULT.cpp.o [ 83%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagKrnl.cpp.DEFAULT.cpp.o [ 83%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardSGDKrnl.cpp.DEFAULT.cpp.o [ 83%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/MergedEmbeddingBagBackwardKrnl.cpp.DEFAULT.cpp.o [ 84%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InteractionKrnl.cpp.DEFAULT.cpp.o [ 84%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/InstanceNormKrnl.cpp.DEFAULT.cpp.o [ 84%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.DEFAULT.cpp.o [ 84%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/GroupNormKrnl.cpp.DEFAULT.cpp.o [ 84%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/EmbeddingBagKrnl.cpp.DEFAULT.cpp.o [ 84%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/DivSoftmaxKrnl.cpp.DEFAULT.cpp.o [ 84%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CumsumKrnl.cpp.DEFAULT.cpp.o [ 84%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConverterKrnl.cpp.DEFAULT.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:17, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/IndexSelectKrnl.cpp.DEFAULT.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Copy.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/IndexSelectKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 85%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/ConcatBnReluKrnl.cpp.DEFAULT.cpp.o [ 85%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.DEFAULT.cpp.o [ 85%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.DEFAULT.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/CatKrnl.cpp.DEFAULT.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/CatKrnl.cpp:8: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 85%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSwishKrnl.cpp.DEFAULT.cpp.o [ 85%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddSoftmaxKrnl.cpp.DEFAULT.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:14, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa_codegen/cpu/aten/kernels/AveragePoolKrnl.cpp.DEFAULT.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Pool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/kernels/AveragePoolKrnl.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:307: warning: "REGISTER_NO_AVX512_DISPATCH" redefined 307 | #define REGISTER_NO_AVX512_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:291: note: this is the location of the previous definition 291 | #define REGISTER_NO_AVX512_DISPATCH(name) \ | [ 85%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/__/isa_codegen/cpu/aten/kernels/AddLayerNormKrnl.cpp.DEFAULT.cpp.o [ 85%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/utils/fpmath_mode.cpp.o [ 85%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/utils/onednn_utils.cpp.o [ 86%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/LlgaTensorImpl.cpp.o [ 86%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/defer_size_check.cpp.o [ 86%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/fusion_group_name.cpp.o [ 86%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/graph_fuser.cpp.o [ 86%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/graph_helper.cpp.o [ 86%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/guard_shape.cpp.o [ 86%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/interface.cpp.o [ 86%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/kernel.cpp.o [ 87%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/layout_propagation.cpp.o [ 87%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/lift_up_quant.cpp.o [ 87%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/prepare_binary.cpp.o [ 87%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/prepare_dequant.cpp.o [ 87%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/prepare_silu.cpp.o [ 87%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/remove_mutation.cpp.o [ 87%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/runtime.cpp.o [ 87%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/codegen/onednn/utils.cpp.o [ 88%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/ConvPacked.cpp.o [ 88%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/ConvTransposePacked.cpp.o [ 88%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/Einsum.cpp.o [ 88%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/LinearMKLPacked.cpp.o [ 88%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/LinearPacked.cpp.o [ 88%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/LinearSwishCustomized.cpp.o [ 88%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/Matmul.cpp.o [ 88%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/MaxPool2D.cpp.o [ 89%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/Mha.cpp.o [ 89%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/OpContext.cpp.o [ 89%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/RegisterOpContextClass.cpp.o [ 89%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/Shuffle.cpp.o [ 89%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/kernels/Softmax.cpp.o [ 89%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/tensorexpr/external_call/conv_nnc.cpp.o [ 89%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/tensorexpr/external_call/linear_nnc.cpp.o [ 89%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/tensorexpr/external_call/matmul_div.cpp.o [ 90%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/tensorexpr/nnc_fuser_register.cpp.o [ 90%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/tensorexpr/nnc_lowering_register.cpp.o [ 90%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/tensorexpr/operator_schema.cpp.o [ 90%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/cpu/tensorexpr/utils.cpp.o [ 90%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/fusion_pass.cpp.o [ 90%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/initialization.cpp.o [ 90%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/concat_linear.cpp.o [ 90%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/frozen_conv_folding.cpp.o [ 91%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/frozen_linear_folding.cpp.o [ 91%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/graph_rewrite.cpp.o [ 91%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/graph_rewrite_conv.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/codegen/onednn/interface.cpp: In function ‘torch::jit::Operation torch_ipex::jit::createLlgaKernel(const torch::jit::Node*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/codegen/onednn/interface.cpp:122:10: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::createLlgaKernel(const torch::jit::Node*)::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 122 | return [kernel](Stack* stack) { | ^~~~~~~~~~~~~~~~~~~~~~~~ 123 | RECORD_FUNCTION(kernel->profileName(), c10::ArrayRef()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 124 | | 125 | kernel->run(*stack); | ~~~~~~~~~~~~~~~~~~~~ 126 | return 0; | ~~~~~~~~~ 127 | }; | ~ In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h:5, from /usr/lib64/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h:4, from /usr/lib64/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h:6, from /usr/lib64/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h:7, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/codegen/onednn/interface.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/codegen/onednn/interface.cpp:1: /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/codegen/onednn/interface.cpp: In function ‘torch::jit::Operation torch_ipex::jit::createLlgaGuardKernel(const torch::jit::Node*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/codegen/onednn/interface.cpp:138:10: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::createLlgaGuardKernel(const torch::jit::Node*)::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 138 | return [node](Stack* stack) { | ^~~~~~~~~~~~~~~~~~~~~~ 139 | RECORD_FUNCTION( | ~~~~~~~~~~~~~~~~ 140 | fuser::onednn::LlgaGuardName(), c10::ArrayRef()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 141 | | 142 | GRAPH_DEBUG("Guarding node: ", node->kind().toQualString()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 143 | std::vector types = node->tys(attr::types); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 144 | const auto num_inputs = types.size(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 145 | | 146 | GRAPH_DEBUG("num_inputs to guard: ", num_inputs); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 147 | | 148 | for (size_t i = 0; i < num_inputs; i++) { | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 149 | GRAPH_DEBUG("checking input ", i); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 150 | auto& input = peek(stack, i, num_inputs); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 151 | const c10::TensorTypePtr& guard_tensor_type = | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 152 | types[i]->cast(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 153 | | 154 | if (!input.isTensor()) { | ~~~~~~~~~~~~~~~~~~~~~~~~ 155 | GRAPH_DEBUG("input ", i, " is not a tensor, return false"); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | push(stack, IValue(false)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | return; | ~~~~~~~ 158 | } | ~ 159 | const at::Tensor& tensor = input.toTensor(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | | 161 | // If input tensor is of mkldnn, it's originated from an upstream | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 162 | // LLGA partition that has passed the check on input shapes. | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 163 | // It is valid to continue here as long as the output shapes from | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 164 | // oneDNN graph partitions are determined by the input shapes. | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 165 | if (tensor.is_mkldnn()) { | ~~~~~~~~~~~~~~~~~~~~~~~~~ 166 | GRAPH_DEBUG("input ", i, " is_mkldnn, continue"); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 167 | continue; | ~~~~~~~~~ 168 | } | ~ 169 | | 170 | if (!guard_tensor_type->matchTensor(tensor)) { | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 171 | GRAPH_DEBUG("input ", i, " check failed, return false"); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 172 | push(stack, IValue(false)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | return; | ~~~~~~~ 174 | } | ~ 175 | } | ~ 176 | | 177 | // TODO: check type and return the right flag | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | // naively return true; | ~~~~~~~~~~~~~~~~~~~~~~~ 179 | GRAPH_DEBUG("all check done, return true"); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | push(stack, IValue(true)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 181 | return; | ~~~~~~~ 182 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ [ 91%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/graph_rewrite_conv_transpose.cpp.o [ 91%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/graph_rewrite_einsum.cpp.o [ 91%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/graph_rewrite_helper.cpp.o [ 91%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/graph_rewrite_inplace_replace.cpp.o [ 91%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/graph_rewrite_linear.cpp.o [ 92%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/graph_rewrite_mha.cpp.o [ 92%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/prepack_folding.cpp.o [ 92%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/register_dnnl_jit_ops.cpp.o [ 92%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/remove_redundant_aliases.cpp.o [ 92%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/passes/utils.cpp.o [ 92%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/__/jit/register_interface.cpp.o [ 92%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/isa/cpu_feature.cpp.o [ 92%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/isa/cpu_feature_main.cpp.o [ 93%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/ideep/IDeepConversions.cpp.o [ 93%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/ideep/IDeepRegistration.cpp.o [ 93%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/autocast/autocast_kernels.cpp.o [ 93%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/autocast/autocast_mode.cpp.o [ 93%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/AddLayerNorm.cpp.o [ 93%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/AddSoftmax.cpp.o [ 93%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/AddSwish.cpp.o [ 93%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/AveragePool.cpp.o [ 94%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/BatchNorm.cpp.o [ 94%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/ConcatBnRelu.cpp.o [ 94%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Conv.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.cpp:11: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Pool.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/AveragePool.cpp:5: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | [ 94%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/ConvTranspose.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_conv_transpose.cpp:3: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:11:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 11 | inline auto accumu_use_check = [](const torch::jit::Node* add_node, | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:29:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 29 | inline auto fuse_add_filter_accumu_on_the_right = | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:70:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 70 | inline auto fuse_add_filter_accumu_on_the_left = | ^~~~~~ [ 94%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Converter.cpp.o [ 94%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Cumsum.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/frozen_linear_folding.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/fusion_pass.cpp:7: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:11:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 11 | inline auto accumu_use_check = [](const torch::jit::Node* add_node, | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:29:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 29 | inline auto fuse_add_filter_accumu_on_the_right = | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:70:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 70 | inline auto fuse_add_filter_accumu_on_the_left = | ^~~~~~ [ 94%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/DivSoftmax.cpp.o [ 94%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Eltwise.cpp.o [ 95%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/EmbeddingBag.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_linear.cpp:6: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:11:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 11 | inline auto accumu_use_check = [](const torch::jit::Node* add_node, | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:29:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 29 | inline auto fuse_add_filter_accumu_on_the_right = | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:70:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 70 | inline auto fuse_add_filter_accumu_on_the_left = | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_linear.cpp: In function ‘void torch_ipex::jit::graph_rewrite::insertPrePackedLinearOp(torch::jit::Block*, std::unordered_set&, const bool&)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_linear.cpp:131:73: warning: suggest parentheses around ‘&&’ within ‘||’ [-Wparentheses] 130 | if (!(weight_dtype_option.has_value() && | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 131 | (weight_dtype_option.value() == at::ScalarType::BFloat16) && | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~ 132 | ideep::has_bf16_type_support() || | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [ 95%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/GroupNorm.cpp.o [ 95%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/InstanceNorm.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_einsum.cpp:3: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:11:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 11 | inline auto accumu_use_check = [](const torch::jit::Node* add_node, | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:29:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 29 | inline auto fuse_add_filter_accumu_on_the_right = | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:70:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 70 | inline auto fuse_add_filter_accumu_on_the_left = | ^~~~~~ [ 95%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Interaction.cpp.o [ 95%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/LayerNorm.cpp.o [ 95%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Linear.cpp.o [ 95%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/LinearMKL.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_conv.cpp:5: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:11:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 11 | inline auto accumu_use_check = [](const torch::jit::Node* add_node, | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:29:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 29 | inline auto fuse_add_filter_accumu_on_the_right = | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:70:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 70 | inline auto fuse_add_filter_accumu_on_the_left = | ^~~~~~ [ 95%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Matmul.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/frozen_linear_folding.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/frozen_linear_folding.cpp:15: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:11:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 11 | inline auto accumu_use_check = [](const torch::jit::Node* add_node, | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:29:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 29 | inline auto fuse_add_filter_accumu_on_the_right = | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:70:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 70 | inline auto fuse_add_filter_accumu_on_the_left = | ^~~~~~ [ 96%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Mean.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_mha.cpp:3: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:11:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 11 | inline auto accumu_use_check = [](const torch::jit::Node* add_node, | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:29:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 29 | inline auto fuse_add_filter_accumu_on_the_right = | ^~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/graph_rewrite_utils.h:70:1: warning: inline variables are only available with ‘-std=c++17’ or ‘-std=gnu++17’ [-Wc++17-extensions] 70 | inline auto fuse_add_filter_accumu_on_the_left = | ^~~~~~ [ 96%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/MergedEmbeddingBag.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:185:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 185 | CreateConvUnaryPostOpPrepack(relu), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h:5, from /usr/lib64/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h:11, from /usr/lib64/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:3: /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:186:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 186 | CreateConvUnaryPostOpPrepack(sigmoid), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:187:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 187 | CreateConvUnaryPostOpPrepack(swish), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:188:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 188 | CreateConvUnaryPostOpPrepack(tanh), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ [ 96%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/MergedEmbeddingBagBackward.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:189:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 189 | CreateConvUnaryPostOpPrepack(mish), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:190:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 190 | CreateConvUnaryPostOpPrepack(abs), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:191:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 191 | CreateConvUnaryPostOpPrepack(exp), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:192:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 192 | CreateConvUnaryPostOpPrepack(hardswish), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:193:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 193 | CreateConvUnaryPostOpPrepack(square), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:194:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 194 | CreateConvUnaryPostOpPrepack(log), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:195:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 195 | CreateConvUnaryPostOpPrepack(round), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:196:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 196 | CreateConvUnaryPostOpPrepack(sqrt), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:55:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 55 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 56 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 57 | std::move((std::move(peek(stack, 0, 8))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 58 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | (std::move(peek(stack, 1, 8))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | std::move((std::move(peek(stack, 2, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | std::move((std::move(peek(stack, 3, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | std::move((std::move(peek(stack, 4, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 | (std::move(peek(stack, 5, 8))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | (std::move(peek(stack, 6, 8))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 | std::move((std::move(peek(stack, 7, 8))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 | ideep::attr_t::fuse_##FUSED_OP().set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | drop(stack, 8); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 70 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:197:5: note: in expansion of macro ‘CreateConvUnaryPostOpPrepack’ 197 | CreateConvUnaryPostOpPrepack(hardsigmoid), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:199:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 199 | CreateConvUnaryPostOpRun(run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:200:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 200 | CreateConvUnaryPostOpRun(relu_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:201:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 201 | CreateConvUnaryPostOpRun(sigmoid_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:202:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 202 | CreateConvUnaryPostOpRun(swish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:203:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 203 | CreateConvUnaryPostOpRun(tanh_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:204:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 204 | CreateConvUnaryPostOpRun(mish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:205:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 205 | CreateConvUnaryPostOpRun(abs_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:206:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 206 | CreateConvUnaryPostOpRun(exp_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:207:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 207 | CreateConvUnaryPostOpRun(hardswish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:208:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 208 | CreateConvUnaryPostOpRun(square_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:209:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 209 | CreateConvUnaryPostOpRun(log_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:210:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 210 | CreateConvUnaryPostOpRun(round_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:211:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 211 | CreateConvUnaryPostOpRun(sqrt_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:82:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 82 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 83 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 85 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 86 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 89 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 90 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:212:5: note: in expansion of macro ‘CreateConvUnaryPostOpRun’ 212 | CreateConvUnaryPostOpRun(hardsigmoid_run), | ^~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:100:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 100 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 101 | auto alpha1 = \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 102 | (std::move(peek(stack, 8, 9))).toOptional(); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 103 | auto scale = alpha1.has_value() ? alpha1.value().to() : 1.0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 104 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 105 | std::move((std::move(peek(stack, 0, 9))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 106 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 107 | (std::move(peek(stack, 1, 9))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 108 | std::move((std::move(peek(stack, 2, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 109 | std::move((std::move(peek(stack, 3, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 110 | std::move((std::move(peek(stack, 4, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 111 | (std::move(peek(stack, 5, 9))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 112 | (std::move(peek(stack, 6, 9))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 113 | std::move((std::move(peek(stack, 7, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 114 | ideep::attr_t::ATTR(scale).set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 115 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 116 | drop(stack, 9); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 117 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 118 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 119 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:214:5: note: in expansion of macro ‘CreateConvBinaryPostOpPrepack’ 214 | CreateConvBinaryPostOpPrepack(add, fuse_sum), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:100:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 100 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 101 | auto alpha1 = \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 102 | (std::move(peek(stack, 8, 9))).toOptional(); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 103 | auto scale = alpha1.has_value() ? alpha1.value().to() : 1.0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 104 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 105 | std::move((std::move(peek(stack, 0, 9))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 106 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 107 | (std::move(peek(stack, 1, 9))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 108 | std::move((std::move(peek(stack, 2, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 109 | std::move((std::move(peek(stack, 3, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 110 | std::move((std::move(peek(stack, 4, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 111 | (std::move(peek(stack, 5, 9))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 112 | (std::move(peek(stack, 6, 9))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 113 | std::move((std::move(peek(stack, 7, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 114 | ideep::attr_t::ATTR(scale).set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 115 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 116 | drop(stack, 9); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 117 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 118 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 119 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:215:5: note: in expansion of macro ‘CreateConvBinaryPostOpPrepack’ 215 | CreateConvBinaryPostOpPrepack(add_relu, residual), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:100:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 100 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 101 | auto alpha1 = \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 102 | (std::move(peek(stack, 8, 9))).toOptional(); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 103 | auto scale = alpha1.has_value() ? alpha1.value().to() : 1.0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 104 | auto result = IpexConvolutionOpContext::create_context( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 105 | std::move((std::move(peek(stack, 0, 9))).toTensor()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 106 | std::move( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 107 | (std::move(peek(stack, 1, 9))).toOptional()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 108 | std::move((std::move(peek(stack, 2, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 109 | std::move((std::move(peek(stack, 3, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 110 | std::move((std::move(peek(stack, 4, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 111 | (std::move(peek(stack, 5, 9))).toInt(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 112 | (std::move(peek(stack, 6, 9))).toBool(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 113 | std::move((std::move(peek(stack, 7, 9))).toIntVector()), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 114 | ideep::attr_t::ATTR(scale).set_fpmath_mode( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 115 | torch_ipex::fpmath_mode)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 116 | drop(stack, 9); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 117 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 118 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 119 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:216:5: note: in expansion of macro ‘CreateConvBinaryPostOpPrepack’ 216 | CreateConvBinaryPostOpPrepack(swish_add, fuse_swish_sum), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:131:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 131 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 132 | auto output = (std::move(peek(stack, 1, 4))).toTensor(); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 133 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 134 | (std::move(peek(stack, 0, 4))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 135 | output, \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 136 | (std::move(peek(stack, 2, 4))).toOptional(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 137 | (std::move(peek(stack, 3, 4))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 138 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 139 | drop(stack, 4); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 140 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 141 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 142 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:217:5: note: in expansion of macro ‘CreateConvBinaryPostOpRun’ 217 | CreateConvBinaryPostOpRun(add_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:131:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 131 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 132 | auto output = (std::move(peek(stack, 1, 4))).toTensor(); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 133 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 134 | (std::move(peek(stack, 0, 4))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 135 | output, \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 136 | (std::move(peek(stack, 2, 4))).toOptional(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 137 | (std::move(peek(stack, 3, 4))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 138 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 139 | drop(stack, 4); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 140 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 141 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 142 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:218:5: note: in expansion of macro ‘CreateConvBinaryPostOpRun’ 218 | CreateConvBinaryPostOpRun(add_relu_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:131:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 131 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 132 | auto output = (std::move(peek(stack, 1, 4))).toTensor(); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 133 | auto result = convolution_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 134 | (std::move(peek(stack, 0, 4))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 135 | output, \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 136 | (std::move(peek(stack, 2, 4))).toOptional(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 137 | (std::move(peek(stack, 3, 4))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 138 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 139 | drop(stack, 4); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 140 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 141 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 142 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:219:5: note: in expansion of macro ‘CreateConvBinaryPostOpRun’ 219 | CreateConvBinaryPostOpRun(swish_add_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:226:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 226 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 227 | auto lower_bound_value = | ~~~~~~~~~~~~~~~~~~~~~~~~ 228 | (std::move(peek(stack, 8, 10))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 229 | auto upper_bound_value = | ~~~~~~~~~~~~~~~~~~~~~~~~ 230 | (std::move(peek(stack, 9, 10))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 231 | auto result = IpexConvolutionOpContext::create_context( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 232 | std::move((std::move(peek(stack, 0, 10))).toTensor()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 233 | std::move( | ~~~~~~~~~~ 234 | (std::move(peek(stack, 1, 10))).toOptional()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 235 | std::move((std::move(peek(stack, 2, 10))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 236 | std::move((std::move(peek(stack, 3, 10))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 237 | std::move((std::move(peek(stack, 4, 10))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 238 | (std::move(peek(stack, 5, 10))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 239 | (std::move(peek(stack, 6, 10))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 240 | std::move((std::move(peek(stack, 7, 10))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 241 | ideep::attr_t::fuse_clamp(lower_bound_value, upper_bound_value) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 242 | .set_fpmath_mode(torch_ipex::fpmath_mode)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 243 | drop(stack, 10); | ~~~~~~~~~~~~~~~~ 244 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 245 | return 0; | ~~~~~~~~~ 246 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:254:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 254 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 255 | auto alpha_value = | ~~~~~~~~~~~~~~~~~~ 256 | (std::move(peek(stack, 8, 11))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 257 | auto scale_value = | ~~~~~~~~~~~~~~~~~~ 258 | (std::move(peek(stack, 9, 11))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 259 | auto input_scale_value = | ~~~~~~~~~~~~~~~~~~~~~~~~ 260 | (std::move(peek(stack, 10, 11))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 261 | auto result = IpexConvolutionOpContext::create_context( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 262 | std::move((std::move(peek(stack, 0, 11))).toTensor()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 263 | std::move( | ~~~~~~~~~~ 264 | (std::move(peek(stack, 1, 11))).toOptional()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 265 | std::move((std::move(peek(stack, 2, 11))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 266 | std::move((std::move(peek(stack, 3, 11))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 267 | std::move((std::move(peek(stack, 4, 11))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 268 | (std::move(peek(stack, 5, 11))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 269 | (std::move(peek(stack, 6, 11))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 270 | std::move((std::move(peek(stack, 7, 11))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 271 | ideep::attr_t::fuse_elu( | ~~~~~~~~~~~~~~~~~~~~~~~~ 272 | scale_value, alpha_value, input_scale_value) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 273 | .set_fpmath_mode(torch_ipex::fpmath_mode)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 274 | drop(stack, 11); | ~~~~~~~~~~~~~~~~ 275 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 276 | return 0; | ~~~~~~~~~ 277 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:285:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 285 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 286 | auto alpha_value = | ~~~~~~~~~~~~~~~~~~ 287 | (std::move(peek(stack, 8, 9))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 288 | auto result = IpexConvolutionOpContext::create_context( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 289 | std::move((std::move(peek(stack, 0, 9))).toTensor()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 290 | std::move( | ~~~~~~~~~~ 291 | (std::move(peek(stack, 1, 9))).toOptional()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 292 | std::move((std::move(peek(stack, 2, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 293 | std::move((std::move(peek(stack, 3, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 294 | std::move((std::move(peek(stack, 4, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 295 | (std::move(peek(stack, 5, 9))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 296 | (std::move(peek(stack, 6, 9))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 297 | std::move((std::move(peek(stack, 7, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 298 | ideep::attr_t::fuse_relu(1.0, alpha_value) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 299 | .set_fpmath_mode(torch_ipex::fpmath_mode)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 300 | drop(stack, 9); | ~~~~~~~~~~~~~~~ 301 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 302 | return 0; | ~~~~~~~~~ 303 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:311:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 311 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 312 | auto exponent_value = | ~~~~~~~~~~~~~~~~~~~~~ 313 | (std::move(peek(stack, 8, 9))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 314 | auto result = IpexConvolutionOpContext::create_context( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 315 | std::move((std::move(peek(stack, 0, 9))).toTensor()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 316 | std::move( | ~~~~~~~~~~ 317 | (std::move(peek(stack, 1, 9))).toOptional()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 318 | std::move((std::move(peek(stack, 2, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 319 | std::move((std::move(peek(stack, 3, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 320 | std::move((std::move(peek(stack, 4, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 321 | (std::move(peek(stack, 5, 9))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 322 | (std::move(peek(stack, 6, 9))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 323 | std::move((std::move(peek(stack, 7, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 324 | ideep::attr_t::fuse_pow(1.0, 1.0, exponent_value) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 325 | .set_fpmath_mode(torch_ipex::fpmath_mode)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 326 | drop(stack, 9); | ~~~~~~~~~~~~~~~ 327 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 328 | return 0; | ~~~~~~~~~ 329 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:337:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 337 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 338 | auto approximate = (std::move(peek(stack, 8, 9))).toStringView(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 339 | dnnl::algorithm gelu_type; | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 340 | if (approximate == "none") { | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 341 | gelu_type = dnnl::algorithm::eltwise_gelu_erf; | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 342 | } else { | ~~~~~~~~ 343 | gelu_type = dnnl::algorithm::eltwise_gelu_tanh; | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 344 | } | ~ 345 | auto result = IpexConvolutionOpContext::create_context( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 346 | std::move((std::move(peek(stack, 0, 9))).toTensor()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 347 | std::move( | ~~~~~~~~~~ 348 | (std::move(peek(stack, 1, 9))).toOptional()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 349 | std::move((std::move(peek(stack, 2, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 350 | std::move((std::move(peek(stack, 3, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 351 | std::move((std::move(peek(stack, 4, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 352 | (std::move(peek(stack, 5, 9))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 353 | (std::move(peek(stack, 6, 9))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 354 | std::move((std::move(peek(stack, 7, 9))).toIntVector()), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 355 | ideep::attr_t::fuse_gelu(1.f, 0.f, 0.f, gelu_type) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 356 | .set_fpmath_mode(torch_ipex::fpmath_mode)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 357 | drop(stack, 9); | ~~~~~~~~~~~~~~~ 358 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 359 | return 0; | ~~~~~~~~~ 360 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:369:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 369 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 370 | auto result = convolution_hardtanh_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 371 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 372 | (std::move(peek(stack, 1, 4))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 373 | (std::move(peek(stack, 2, 4))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 374 | (std::move(peek(stack, 3, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 375 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 376 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 377 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 378 | return 0; | ~~~~~~~~~ 379 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:388:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 388 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 389 | auto result = convolution_elu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 390 | (std::move(peek(stack, 0, 5))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 391 | (std::move(peek(stack, 1, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 392 | (std::move(peek(stack, 2, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 393 | (std::move(peek(stack, 3, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 394 | (std::move(peek(stack, 4, 5))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 395 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 396 | drop(stack, 5); | ~~~~~~~~~~~~~~~ 397 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 398 | return 0; | ~~~~~~~~~ 399 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:407:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 407 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 408 | auto result = convolution_leaky_relu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 409 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 410 | (std::move(peek(stack, 1, 3))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 411 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 412 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 413 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 414 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 415 | return 0; | ~~~~~~~~~ 416 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:424:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 424 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 425 | auto result = convolution_pow_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 426 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 427 | (std::move(peek(stack, 1, 3))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 428 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 429 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 430 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 431 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 432 | return 0; | ~~~~~~~~~ 433 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:444:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 444 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 445 | auto output = (std::move(peek(stack, 0, 4))).toTensor(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 446 | auto result = convolution_bottleneck_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 447 | output, | ~~~~~~~ 448 | (std::move(peek(stack, 1, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 449 | .toCustomClass(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 450 | (std::move(peek(stack, 2, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 451 | .toCustomClass(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 452 | (std::move(peek(stack, 3, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 453 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 454 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 455 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 456 | return 0; | ~~~~~~~~~ 457 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:468:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 468 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 469 | auto result = convolution_bottleneck_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 470 | (std::move(peek(stack, 0, 5))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 471 | (std::move(peek(stack, 1, 5))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 472 | .toCustomClass(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 473 | (std::move(peek(stack, 2, 5))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 474 | .toCustomClass(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 475 | (std::move(peek(stack, 3, 5))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 476 | .toCustomClass(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 477 | (std::move(peek(stack, 4, 5))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 478 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 479 | drop(stack, 5); | ~~~~~~~~~~~~~~~ 480 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 481 | return 0; | ~~~~~~~~~ 482 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:490:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 490 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 491 | auto result = convolution_gelu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 492 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 493 | (std::move(peek(stack, 1, 3))).toStringView(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 494 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 495 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 496 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 497 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 498 | return 0; | ~~~~~~~~~ 499 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:503:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 503 | CreateLinearUnaryPostOpRun(run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ [ 96%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/MergedEmbeddingBagBackwardSGD.cpp.o /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:504:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 504 | CreateLinearUnaryPostOpRun(relu_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:505:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 505 | CreateLinearUnaryPostOpRun(sigmoid_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:506:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 506 | CreateLinearUnaryPostOpRun(swish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:507:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 507 | CreateLinearUnaryPostOpRun(tanh_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:508:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 508 | CreateLinearUnaryPostOpRun(mish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:509:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 509 | CreateLinearUnaryPostOpRun(abs_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:510:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 510 | CreateLinearUnaryPostOpRun(exp_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:511:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 511 | CreateLinearUnaryPostOpRun(hardswish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:512:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 512 | CreateLinearUnaryPostOpRun(square_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:513:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 513 | CreateLinearUnaryPostOpRun(log_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:514:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 514 | CreateLinearUnaryPostOpRun(round_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:515:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 515 | CreateLinearUnaryPostOpRun(sqrt_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:153:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 153 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | auto result = linear_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 155 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 157 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 158 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 159 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 160 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 161 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:516:5: note: in expansion of macro ‘CreateLinearUnaryPostOpRun’ 516 | CreateLinearUnaryPostOpRun(hardsigmoid_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:523:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 523 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 524 | auto result = linear_leaky_relu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 525 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 526 | (std::move(peek(stack, 1, 3))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 527 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 528 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 529 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 530 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 531 | return 0; | ~~~~~~~~~ 532 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:541:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 541 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 542 | auto result = linear_hardtanh_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 543 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 544 | (std::move(peek(stack, 1, 4))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 545 | (std::move(peek(stack, 2, 4))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 546 | (std::move(peek(stack, 3, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 547 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 548 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 549 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 550 | return 0; | ~~~~~~~~~ 551 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:560:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 560 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 561 | auto result = linear_elu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 562 | (std::move(peek(stack, 0, 5))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 563 | (std::move(peek(stack, 1, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 564 | (std::move(peek(stack, 2, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 565 | (std::move(peek(stack, 3, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 566 | (std::move(peek(stack, 4, 5))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 567 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 568 | drop(stack, 5); | ~~~~~~~~~~~~~~~ 569 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 570 | return 0; | ~~~~~~~~~ 571 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:579:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 579 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 580 | auto result = linear_pow_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 581 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 582 | (std::move(peek(stack, 1, 3))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 583 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 584 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 585 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 586 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 587 | return 0; | ~~~~~~~~~ 588 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ [ 96%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/MultiHeadAttention.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:596:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 596 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 597 | auto result = linear_gelu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 598 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 599 | (std::move(peek(stack, 1, 3))).toStringView(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 600 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 601 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 602 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 603 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 604 | return 0; | ~~~~~~~~~ 605 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:614:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 614 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 615 | auto output = (std::move(peek(stack, 1, 4))).toTensor(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 616 | auto result = linear_add_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 617 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 618 | output, | ~~~~~~~ 619 | (std::move(peek(stack, 2, 4))).toOptional(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 620 | (std::move(peek(stack, 3, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 621 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 622 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 623 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 624 | return 0; | ~~~~~~~~~ 625 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:634:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 634 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 635 | auto output = (std::move(peek(stack, 1, 4))).toTensor(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 636 | auto result = linear_add_relu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 637 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 638 | output, | ~~~~~~~ 639 | (std::move(peek(stack, 2, 4))).toOptional(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 640 | (std::move(peek(stack, 3, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 641 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 642 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 643 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 644 | return 0; | ~~~~~~~~~ 645 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:653:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 653 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 654 | auto result = mkl_sgemm_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 655 | (std::move(peek(stack, 0, 2))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 656 | (std::move(peek(stack, 1, 2))).toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 657 | drop(stack, 2); | ~~~~~~~~~~~~~~~ 658 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 659 | return 0; | ~~~~~~~~~ 660 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:665:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 665 | CreateConvTransposeUnaryPostOpRun(run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:666:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 666 | CreateConvTransposeUnaryPostOpRun(relu_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:667:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 667 | CreateConvTransposeUnaryPostOpRun(sigmoid_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:668:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 668 | CreateConvTransposeUnaryPostOpRun(swish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:669:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 669 | CreateConvTransposeUnaryPostOpRun(tanh_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:670:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 670 | CreateConvTransposeUnaryPostOpRun(mish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:671:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 671 | CreateConvTransposeUnaryPostOpRun(abs_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:672:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 672 | CreateConvTransposeUnaryPostOpRun(exp_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:673:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 673 | CreateConvTransposeUnaryPostOpRun(hardswish_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:674:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 674 | CreateConvTransposeUnaryPostOpRun(square_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:675:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 675 | CreateConvTransposeUnaryPostOpRun(log_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:676:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 676 | CreateConvTransposeUnaryPostOpRun(round_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:677:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 677 | CreateConvTransposeUnaryPostOpRun(sqrt_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:172:16: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 172 | return [](Stack* stack) { \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 173 | auto result = conv_transpose_##FUSED_OP( \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 174 | (std::move(peek(stack, 0, 2))).toTensor(), \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | (std::move(peek(stack, 1, 2))) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 176 | .toCustomClass()); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 177 | drop(stack, 2); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 178 | torch::jit::pack(stack, std::move(result)); \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 179 | return 0; \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 180 | }; \ | ~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:678:5: note: in expansion of macro ‘CreateConvTransposeUnaryPostOpRun’ 678 | CreateConvTransposeUnaryPostOpRun(hardsigmoid_run), | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:685:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 685 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 686 | auto result = conv_transpose_gelu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 687 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 688 | (std::move(peek(stack, 1, 3))).toStringView(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 689 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 690 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 691 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 692 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 693 | return 0; | ~~~~~~~~~ 694 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:702:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 702 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 703 | auto result = conv_transpose_leaky_relu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 704 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 705 | (std::move(peek(stack, 1, 3))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 706 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 707 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 708 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 709 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 710 | return 0; | ~~~~~~~~~ 711 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:720:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 720 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 721 | auto result = conv_transpose_hardtanh_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 722 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 723 | (std::move(peek(stack, 1, 4))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 724 | (std::move(peek(stack, 2, 4))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 725 | (std::move(peek(stack, 3, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 726 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 727 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 728 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 729 | return 0; | ~~~~~~~~~ 730 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:739:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 739 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 740 | auto result = conv_transpose_elu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 741 | (std::move(peek(stack, 0, 5))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 742 | (std::move(peek(stack, 1, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 743 | (std::move(peek(stack, 2, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 744 | (std::move(peek(stack, 3, 5))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 745 | (std::move(peek(stack, 4, 5))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 746 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 747 | drop(stack, 5); | ~~~~~~~~~~~~~~~ 748 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 749 | return 0; | ~~~~~~~~~ 750 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:758:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 758 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 759 | auto result = conv_transpose_pow_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 760 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 761 | (std::move(peek(stack, 1, 3))).toScalar(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 762 | (std::move(peek(stack, 2, 3))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 763 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 764 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 765 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 766 | return 0; | ~~~~~~~~~ 767 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:777:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 777 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 778 | auto output = (std::move(peek(stack, 1, 4))).toTensor(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 779 | auto result = conv_transpose_add_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 780 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 781 | output, | ~~~~~~~ 782 | (std::move(peek(stack, 2, 4))).toOptional(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 783 | (std::move(peek(stack, 3, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 784 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 785 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 786 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 787 | return 0; | ~~~~~~~~~ 788 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:797:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 797 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 798 | auto output = (std::move(peek(stack, 1, 4))).toTensor(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 799 | auto result = conv_transpose_add_relu_run( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 800 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 801 | output, | ~~~~~~~ 802 | (std::move(peek(stack, 2, 4))).toOptional(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 803 | (std::move(peek(stack, 3, 4))) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 804 | .toCustomClass()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 805 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 806 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 807 | return 0; | ~~~~~~~~~ 808 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:815:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 815 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 816 | auto result = dil_matmul_div( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 817 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 818 | (std::move(peek(stack, 1, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 819 | toOptionalTensor(std::move(peek(stack, 2, 4))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 820 | (std::move(peek(stack, 3, 4))).toTensor()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 821 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 822 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 823 | return 0; | ~~~~~~~~~ 824 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:832:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 832 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 833 | auto result = dil_matmul_div( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 834 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 835 | (std::move(peek(stack, 1, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 836 | toOptionalTensor(std::move(peek(stack, 2, 4))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 837 | (std::move(peek(stack, 3, 4))).toScalar()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 838 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 839 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 840 | return 0; | ~~~~~~~~~ 841 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:849:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 849 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 850 | auto result = dil_matmul_div( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 851 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 852 | (std::move(peek(stack, 1, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 853 | at::Tensor(), | ~~~~~~~~~~~~~ 854 | (std::move(peek(stack, 2, 3))).toTensor()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 855 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 856 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 857 | return 0; | ~~~~~~~~~ 858 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:866:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 866 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 867 | auto result = dil_matmul_div( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 868 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 869 | (std::move(peek(stack, 1, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 870 | at::Tensor(), | ~~~~~~~~~~~~~ 871 | (std::move(peek(stack, 2, 3))).toScalar()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 872 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 873 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 874 | return 0; | ~~~~~~~~~ 875 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:883:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 883 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 884 | auto mul_tensor = std::move(peek(stack, 3, 4).toTensor()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 885 | auto mul_input_data = mul_tensor.item(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 886 | // divide mul_input to reuse dil_matmul_div function | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 887 | auto div_input_data = 1.0f / mul_input_data.to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 888 | auto result = dil_matmul_div( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 889 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 890 | (std::move(peek(stack, 1, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 891 | toOptionalTensor(std::move(peek(stack, 2, 4))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 892 | div_input_data); | ~~~~~~~~~~~~~~~~ 893 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 894 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 895 | return 0; | ~~~~~~~~~ 896 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:904:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 904 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 905 | // divide mul_input to reuse dil_matmul_div function | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 906 | auto div_input_data = | ~~~~~~~~~~~~~~~~~~~~~ 907 | 1.0f / (std::move(peek(stack, 3, 4))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 908 | auto result = dil_matmul_div( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 909 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 910 | (std::move(peek(stack, 1, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 911 | toOptionalTensor(std::move(peek(stack, 2, 4))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 912 | div_input_data); | ~~~~~~~~~~~~~~~~ 913 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 914 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 915 | return 0; | ~~~~~~~~~ 916 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:924:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 924 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 925 | auto mul_tensor = (std::move(peek(stack, 2, 3))).toTensor(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 926 | auto mul_input_data = mul_tensor.item(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 927 | // divide mul_input to reuse dil_matmul_div function | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 928 | auto div_input_data = 1.0f / mul_input_data.to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 929 | auto result = dil_matmul_div( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 930 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 931 | (std::move(peek(stack, 1, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 932 | at::Tensor(), | ~~~~~~~~~~~~~ 933 | div_input_data); | ~~~~~~~~~~~~~~~~ 934 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 935 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 936 | return 0; | ~~~~~~~~~ 937 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:945:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 945 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 946 | // divide mul_input to reuse dil_matmul_div function | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 947 | auto div_input_data = | ~~~~~~~~~~~~~~~~~~~~~ 948 | 1.0f / (std::move(peek(stack, 2, 3))).toScalar().to(); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 949 | auto result = dil_matmul_div( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 950 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 951 | (std::move(peek(stack, 1, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 952 | at::Tensor(), | ~~~~~~~~~~~~~ 953 | div_input_data); | ~~~~~~~~~~~~~~~~ 954 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 955 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 956 | return 0; | ~~~~~~~~~ 957 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:965:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 965 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 966 | auto result = dil_bmm_add( | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 967 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 968 | (std::move(peek(stack, 1, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 969 | (std::move(peek(stack, 2, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 970 | (std::move(peek(stack, 3, 4))).toScalar()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 971 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 972 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 973 | return 0; | ~~~~~~~~~ 974 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ [ 96%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Nms.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:981:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 981 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 982 | auto result = dil_matmul( | ~~~~~~~~~~~~~~~~~~~~~~~~~ 983 | (std::move(peek(stack, 0, 2))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 984 | (std::move(peek(stack, 1, 2))).toTensor()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 985 | drop(stack, 2); | ~~~~~~~~~~~~~~~ 986 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 987 | return 0; | ~~~~~~~~~ 988 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:995:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 995 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 996 | auto result = dil_mha_matmul_trans( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 997 | (std::move(peek(stack, 0, 2))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 998 | (std::move(peek(stack, 1, 2))).toTensor()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 999 | drop(stack, 2); | ~~~~~~~~~~~~~~~ 1000 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1001 | return 0; | ~~~~~~~~~ 1002 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1286:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1286 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1287 | auto result = dil_softmax( | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 1288 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1289 | (std::move(peek(stack, 1, 3))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1290 | (std::move(peek(stack, 2, 3)))); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1291 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 1292 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1293 | return 0; | ~~~~~~~~~ 1294 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1316:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1316 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1317 | auto result = at::batch_norm( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1318 | (std::move(peek(stack, 0, 9))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1319 | toOptionalTensor(std::move(peek(stack, 1, 9))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1320 | toOptionalTensor(std::move(peek(stack, 2, 9))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1321 | toOptionalTensor(std::move(peek(stack, 3, 9))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1322 | toOptionalTensor(std::move(peek(stack, 4, 9))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1323 | (std::move(peek(stack, 5, 9))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1324 | (std::move(peek(stack, 6, 9))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1325 | (std::move(peek(stack, 7, 9))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1326 | (std::move(peek(stack, 8, 9))).toBool()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1327 | drop(stack, 9); | ~~~~~~~~~~~~~~~ 1328 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1329 | return 0; | ~~~~~~~~~ 1330 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1339:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1339 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1340 | auto result = dil_qembeddingbag( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1341 | (std::move(peek(stack, 0, 8))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1342 | (std::move(peek(stack, 1, 8))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1343 | (std::move(peek(stack, 2, 8))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1344 | (std::move(peek(stack, 3, 8))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1345 | (std::move(peek(stack, 4, 8))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1346 | (std::move(peek(stack, 5, 8))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1347 | (std::move(peek(stack, 6, 8))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1348 | (std::move(peek(stack, 7, 8))).toScalarType()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1349 | drop(stack, 8); | ~~~~~~~~~~~~~~~ 1350 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1351 | return 0; | ~~~~~~~~~ 1352 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1360:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1360 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1361 | auto result = dil_qinteraction( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1362 | (std::move(peek(stack, 0, 4))).toTensorVector(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1363 | (std::move(peek(stack, 1, 4))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1364 | (std::move(peek(stack, 2, 4))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1365 | (std::move(peek(stack, 3, 4))).toScalarType()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1366 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 1367 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1368 | return 0; | ~~~~~~~~~ 1369 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1376:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1376 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1377 | auto result = quantized_lstm( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1378 | (std::move(peek(stack, 0, 12))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1379 | (std::move(peek(stack, 1, 12))).toTensorList(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1380 | (std::move(peek(stack, 2, 12))).toTensorList(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1381 | (std::move(peek(stack, 3, 12))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1382 | (std::move(peek(stack, 4, 12))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1383 | (std::move(peek(stack, 5, 12))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1384 | (std::move(peek(stack, 6, 12))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1385 | (std::move(peek(stack, 7, 12))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1386 | (std::move(peek(stack, 8, 12))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1387 | (std::move(peek(stack, 9, 12))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1388 | (std::move(peek(stack, 10, 12))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1389 | (std::move(peek(stack, 11, 12))).toInt()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1390 | drop(stack, 12); | ~~~~~~~~~~~~~~~~ 1391 | | 1392 | torch::jit::pack(stack, std::move(std::get<0>(result))); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1393 | torch::jit::pack(stack, std::move(std::get<1>(result))); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1394 | torch::jit::pack(stack, std::move(std::get<2>(result))); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1395 | return 0; | ~~~~~~~~~ 1396 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1407:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1407 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1408 | auto result = dil_shuffle( | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 1409 | (std::move(peek(stack, 0, 4))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1410 | (std::move(peek(stack, 1, 4))).toIntVector(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1411 | (std::move(peek(stack, 2, 4))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1412 | (std::move(peek(stack, 3, 4))).toInt()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1413 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 1414 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1415 | return 0; | ~~~~~~~~~ 1416 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1423:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1423 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1424 | auto result = dil_RMSNorm( | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 1425 | (std::move(peek(stack, 0, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1426 | (std::move(peek(stack, 1, 3))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1427 | (std::move(peek(stack, 2, 3))).toDouble()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1428 | drop(stack, 3); | ~~~~~~~~~~~~~~~ 1429 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1430 | return 0; | ~~~~~~~~~ 1431 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1440:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1440 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1441 | auto result = dil_add_layernorm( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1442 | (std::move(peek(stack, 0, 8))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1443 | (std::move(peek(stack, 1, 8))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1444 | (std::move(peek(stack, 2, 8))).toInt(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1445 | (std::move(peek(stack, 3, 8))).toIntVector(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1446 | toOptionalTensor(std::move(peek(stack, 4, 8))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1447 | toOptionalTensor(std::move(peek(stack, 5, 8))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1448 | (std::move(peek(stack, 6, 8))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1449 | (std::move(peek(stack, 7, 8))).toBool()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1450 | drop(stack, 8); | ~~~~~~~~~~~~~~~ 1451 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1452 | return 0; | ~~~~~~~~~ 1453 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1461:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1461 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1462 | auto result = ConcatBnRelu( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1463 | (std::move(peek(stack, 0, 12))).toTensorList(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1464 | (std::move(peek(stack, 1, 12))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1465 | (std::move(peek(stack, 2, 12))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1466 | toOptionalTensor(std::move(peek(stack, 3, 12))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1467 | toOptionalTensor(std::move(peek(stack, 4, 12))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1468 | toOptionalTensor(std::move(peek(stack, 5, 12))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1469 | toOptionalTensor(std::move(peek(stack, 6, 12))), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1470 | (std::move(peek(stack, 7, 12))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1471 | (std::move(peek(stack, 8, 12))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1472 | (std::move(peek(stack, 9, 12))).toDouble(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1473 | (std::move(peek(stack, 10, 12))).toBool(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1474 | (std::move(peek(stack, 11, 12))).toInt()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1475 | drop(stack, 12); | ~~~~~~~~~~~~~~~~ 1476 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1477 | return 0; | ~~~~~~~~~ 1478 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1484:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1484 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1485 | auto other_arg = std::move(peek(stack, 2, 4)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1486 | if (other_arg.isDouble()) { | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1487 | auto other_arg_st = at::ones(1).fill_(other_arg.toDouble()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1488 | auto result = einsum_binary( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1489 | (std::move(peek(stack, 0, 4))).toStringView(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1490 | (std::move(peek(stack, 1, 4))).toTensorList(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1491 | other_arg_st, | ~~~~~~~~~~~~~ 1492 | (std::move(peek(stack, 3, 4))).toScalar()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1493 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 1494 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1495 | } else { | ~~~~~~~~ 1496 | auto result = einsum_binary( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1497 | (std::move(peek(stack, 0, 4))).toStringView(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1498 | (std::move(peek(stack, 1, 4))).toTensorList(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1499 | other_arg.toTensor(), | ~~~~~~~~~~~~~~~~~~~~~ 1500 | (std::move(peek(stack, 3, 4))).toScalar()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1501 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 1502 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1503 | } | ~ 1504 | return 0; | ~~~~~~~~~ 1505 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1511:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1511 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1512 | auto other_arg = std::move(peek(stack, 2, 4)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1513 | auto other_arg_st = at::ones(1).fill_(other_arg.toScalar()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1514 | auto result = einsum_binary( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1515 | (std::move(peek(stack, 0, 4))).toStringView(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1516 | (std::move(peek(stack, 1, 4))).toTensorList(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1517 | other_arg_st, | ~~~~~~~~~~~~~ 1518 | (std::move(peek(stack, 3, 4))).toScalar()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1519 | | 1520 | drop(stack, 4); | ~~~~~~~~~~~~~~~ 1521 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1522 | return 0; | ~~~~~~~~~ 1523 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp: In lambda function: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/jit/passes/register_dnnl_jit_ops.cpp:1530:18: warning: ‘torch::jit::Operation::Operation(F&&) [with F = torch_ipex::jit::::; typename std::enable_if*)>, F&&>::value, int>::type = 0]’ is deprecated: Please use void(Stack&) to register operator instead. [-Wdeprecated-declarations] 1530 | return [](Stack* stack) { | ^~~~~~~~~~~~~~~~~~ 1531 | auto result = dil_max_pool2d( | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1532 | (std::move(peek(stack, 0, 6))).toTensor(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1533 | (std::move(peek(stack, 1, 6))).toIntVector(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1534 | (std::move(peek(stack, 2, 6))).toIntVector(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1535 | (std::move(peek(stack, 3, 6))).toIntVector(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1536 | (std::move(peek(stack, 4, 6))).toIntVector(), | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1537 | (std::move(peek(stack, 5, 6))).toBool()); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1538 | drop(stack, 6); | ~~~~~~~~~~~~~~~ 1539 | torch::jit::pack(stack, std::move(result)); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1540 | return 0; | ~~~~~~~~~ 1541 | }; | ~ /usr/lib64/python3.10/site-packages/torch/include/ATen/core/stack.h:25:3: note: declared here 25 | Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { | ^~~~~~~~~ [ 96%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/RMSNorm.cpp.o [ 96%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/RNN.cpp.o [ 97%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/ROIAlign.cpp.o [ 97%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/RnntEmbedding.cpp.o [ 97%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/Sum.cpp.o [ 97%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/TensorAdvancedIndexing.cpp.o [ 97%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/TensorShape.cpp.o [ 97%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/TorchVisionNms.cpp.o [ 97%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/UpdateBatch.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.cpp:24: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/BinaryOps.h:4, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.cpp:7: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | [ 97%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/WeightPack.cpp.o [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/optimizer/AdagradFusedStep.cpp.o [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/optimizer/LambFusedStep.cpp.o [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/optimizer/AdamFusedStep.cpp.o In file included from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorAdvancedIndexing.h:5, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.cpp:34: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:260: warning: "REGISTER_ARCH_DISPATCH" redefined 260 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | In file included from /usr/lib64/python3.10/site-packages/torch/include/ATen/native/Copy.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/TensorShape.cpp:12: /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:229: note: this is the location of the previous definition 229 | #define REGISTER_ARCH_DISPATCH(name, arch, fn) \ | /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/dyndisp/DispatchStub.h:277: warning: "REGISTER_NO_CPU_DISPATCH" redefined 277 | #define REGISTER_NO_CPU_DISPATCH(name, fn_type) \ | /usr/lib64/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h:265: note: this is the location of the previous definition 265 | #define REGISTER_NO_CPU_DISPATCH(name) \ | [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/optimizer/SgdFusedStep.cpp.o [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/optimizer/SplitSgdStep.cpp.o [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/utils/csr2csc.cpp.o [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/aten/utils/isa_help.cpp.o [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/runtime/CPUPool.cpp.o [ 98%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/toolkit/sklearn.cpp.o [ 99%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/runtime/TaskExecutor.cpp.o [ 99%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/tpp/bert/fused_bert.cpp.o [ 99%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/tpp/common_loops.cpp.o [ 99%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/tpp/init.cpp.o [ 99%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/tpp/jit_compile.cpp.o [ 99%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/tpp/optim.cpp.o [ 99%] Building CXX object csrc/cpu/CMakeFiles/intel-ext-pt-cpu.dir/tpp/par_loop_generator.cpp.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘void torch_ipex::tpp::parse_jit_info(char*, loop_param_t*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:413:32: warning: zero-length gnu_printf format string [-Wformat-zero-length] 413 | sprintf(token_start, ""); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:415:30: warning: zero-length gnu_printf format string [-Wformat-zero-length] 415 | sprintf(token_end, ""); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:417:31: warning: zero-length gnu_printf format string [-Wformat-zero-length] 417 | sprintf(token_step, ""); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:423:32: warning: zero-length gnu_printf format string [-Wformat-zero-length] 423 | sprintf(token_start, ""); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:425:30: warning: zero-length gnu_printf format string [-Wformat-zero-length] 425 | sprintf(token_end, ""); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:427:31: warning: zero-length gnu_printf format string [-Wformat-zero-length] 427 | sprintf(token_step, ""); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘std::string torch_ipex::tpp::loop_generator(const char*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:691:35: warning: format ‘%d’ expects argument of type ‘int’, but argument 3 has type ‘long int’ [-Wformat=] 691 | sprintf(start_var_name, "%d", loop_params_map[loop_abs_index].start); | ~^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | | int long int | %ld /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:703:33: warning: format ‘%d’ expects argument of type ‘int’, but argument 3 has type ‘long int’ [-Wformat=] 703 | sprintf(end_var_name, "%d", loop_params_map[loop_abs_index].end); | ~^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | | int long int | %ld /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:711:22: warning: format ‘%d’ expects argument of type ‘int’, but argument 5 has type ‘long int’ [-Wformat=] 711 | "%c%d + %d", | ~^ | | | int | %ld ...... 714 | loop_params_map[loop_abs_index].block_size[occurence_id - 1]); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | long int /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:730:36: warning: format ‘%d’ expects argument of type ‘int’, but argument 3 has type ‘long int’ [-Wformat=] 730 | sprintf(step_var_name, "%d", loop_params_map[loop_abs_index].step); | ~^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | | int long int | %ld /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:739:17: warning: format ‘%d’ expects argument of type ‘int’, but argument 3 has type ‘long int’ [-Wformat=] 739 | "%d", | ~^ | | | int | %ld 740 | loop_params_map[loop_abs_index].block_size[occurence_id]); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | long int /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:752:34: warning: format ‘%d’ expects argument of type ‘int’, but argument 3 has type ‘long int’ [-Wformat=] 752 | sprintf(step_var_name, "%d", loop_params_map[loop_abs_index].step); | ~^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | | int long int | %ld /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:262:19: warning: ‘%s’ directive writing up to 511 bytes into a region of size 503 [-Wformat-overflow=] 262 | "for (int %s = %s; %s < %s; %s += %s) {\n", | ^~ 263 | str_idx, | ~~~~~~~ In file included from /usr/include/stdio.h:906, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:3: In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:260:12: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 28 and 3094 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:202:27: warning: ‘%s’ directive writing up to 511 bytes into a region of size between 482 and 497 [-Wformat-overflow=] 202 | "int %s_tasks = ((%s) - (%s) + ((%s) - 1))/(%s);\n", | ^~ 203 | prefix, 204 | str_end, | ~~~~~~~ In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:200:12: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 39 and 2098 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:221:70: warning: ‘%s’ directive writing up to 511 bytes into a region of size between 400 and 460 [-Wformat-overflow=] 221 | "int my_%s_start = (%s_id * %s_tasks_chunksize < %s_tasks) ? %s + (%s_id * %s_tasks_chunksize) * %s : %s;\n", | ^~ ...... 226 | str_start, | ~~~~~~~~~ In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:219:12: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 88 and 1711 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:235:72: warning: ‘%s’ directive writing up to 511 bytes into a region of size between 398 and 458 [-Wformat-overflow=] 235 | "int my_%s_end = ((%s_id+1) * %s_tasks_chunksize < %s_tasks) ? %s + ((%s_id+1) * %s_tasks_chunksize) * %s : %s;\n", | ^~ ...... 240 | str_start, | ~~~~~~~~~ In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:233:12: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 94 and 1717 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:249:19: warning: ‘%s’ directive writing up to 511 bytes into a region of size 503 [-Wformat-overflow=] 249 | "for (int %s = my_%s_start; %s < my_%s_end; %s += %s) {\n", | ^~ 250 | str_idx, | ~~~~~~~ In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘void torch_ipex::tpp::emit_loop_header(loop_code*, loop_param_t*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:247:12: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 44 and 2118 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘std::string torch_ipex::tpp::loop_generator(const char*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:718:21: warning: ‘%s’ directive writing up to 511 bytes into a region of size between 504 and 507 [-Wformat-overflow=] 718 | "%c%d + %s[%d].block_size[%d]", | ^~ ...... 721 | spec_array_name, | ~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:718:13: note: directive argument in the range [-2147483648, 2147483550] 718 | "%c%d + %s[%d].block_size[%d]", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:718:13: note: directive argument in the range [-129, 126] In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘std::string torch_ipex::tpp::loop_generator(const char*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:716:16: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 23 and 550 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘std::string torch_ipex::tpp::loop_generator(const char*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:694:33: warning: ‘%d’ directive writing between 1 and 11 bytes into a region of size between 0 and 511 [-Wformat-overflow=] 694 | start_var_name, "%s[%d].start", spec_array_name, loop_abs_index); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:694:29: note: directive argument in the range [-2147483648, 2147483550] 694 | start_var_name, "%s[%d].start", spec_array_name, loop_abs_index); | ^~~~~~~~~~~~~~ In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘std::string torch_ipex::tpp::loop_generator(const char*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:693:16: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 10 and 531 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘std::string torch_ipex::tpp::loop_generator(const char*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:705:35: warning: ‘%d’ directive writing between 1 and 11 bytes into a region of size between 0 and 511 [-Wformat-overflow=] 705 | sprintf(end_var_name, "%s[%d].end", spec_array_name, loop_abs_index); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:705:31: note: directive argument in the range [-2147483648, 2147483550] 705 | sprintf(end_var_name, "%s[%d].end", spec_array_name, loop_abs_index); | ^~~~~~~~~~~~ In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘std::string torch_ipex::tpp::loop_generator(const char*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:705:16: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 8 and 529 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘std::string torch_ipex::tpp::loop_generator(const char*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:754:36: warning: ‘%d’ directive writing between 1 and 11 bytes into a region of size between 0 and 511 [-Wformat-overflow=] 754 | sprintf(step_var_name, "%s[%d].step", spec_array_name, loop_abs_index); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:754:32: note: directive argument in the range [-2147483648, 2147483550] 754 | sprintf(step_var_name, "%s[%d].step", spec_array_name, loop_abs_index); | ^~~~~~~~~~~~~ In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘std::string torch_ipex::tpp::loop_generator(const char*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:754:16: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 9 and 530 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘std::string torch_ipex::tpp::loop_generator(const char*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:744:19: warning: ‘%d’ directive writing between 1 and 11 bytes into a region of size between 0 and 511 [-Wformat-overflow=] 744 | "%s[%d].block_size[%d]", | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:744:15: note: directive argument in the range [-2147483648, 2147483550] 744 | "%s[%d].block_size[%d]", | ^~~~~~~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:744:15: note: directive argument in the range [-128, 127] In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘std::string torch_ipex::tpp::loop_generator(const char*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:742:18: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 18 and 542 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp: In function ‘std::string torch_ipex::tpp::loop_generator(const char*)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:733:34: warning: ‘%d’ directive writing between 1 and 11 bytes into a region of size between 0 and 511 [-Wformat-overflow=] 733 | step_var_name, "%s[%d].step", spec_array_name, loop_abs_index); | ^~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:733:30: note: directive argument in the range [-2147483648, 2147483550] 733 | step_var_name, "%s[%d].step", spec_array_name, loop_abs_index); | ^~~~~~~~~~~~~ In function ‘int sprintf(char*, const char*, ...)’, inlined from ‘std::string torch_ipex::tpp::loop_generator(const char*)’ at /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp:732:18: /usr/include/bits/stdio2.h:30:34: note: ‘__builtin___sprintf_chk’ output between 9 and 530 bytes into a destination of size 512 30 | return __builtin___sprintf_chk (__s, __USE_FORTIFY_LEVEL - 1, | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | __glibc_objsize (__s), __fmt, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 32 | __va_arg_pack ()); | ~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/init.cpp:15:17: note: ‘#pragma message: Using OpenMP’ 15 | #pragma message "Using OpenMP" | ^~~~~~~~~~~~~~ In file included from /usr/lib64/python3.10/site-packages/torch/include/c10/core/DeviceType.h:8, from /usr/lib64/python3.10/site-packages/torch/include/c10/core/Device.h:3, from /usr/lib64/python3.10/site-packages/torch/include/ATen/core/TensorBody.h:11, from /usr/lib64/python3.10/site-packages/torch/include/ATen/core/Tensor.h:3, from /usr/lib64/python3.10/site-packages/torch/include/ATen/Tensor.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/RNN.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/RNN.cpp:1: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/RNN.cpp: In function ‘std::vector > torch_ipex::cpu::get_mkldnn_weight_scales_of_lstm(const at::Tensor&, const at::Tensor&)’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/RNN.cpp:225:39: warning: suggest parentheses around comparison in operand of ‘==’ [-Wparentheses] 225 | weight_ih_scales_tensor.sizes() == weight_hh_scales_tensor.sizes() == 1, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/c10/macros/Macros.h:200:64: note: in definition of macro ‘C10_UNLIKELY’ 200 | #define C10_UNLIKELY(expr) (__builtin_expect(static_cast(expr), 0)) | ^~~~ /usr/lib64/python3.10/site-packages/torch/include/c10/util/Exception.h:506:7: note: in expansion of macro ‘C10_UNLIKELY_OR_CONST’ 506 | if (C10_UNLIKELY_OR_CONST(!(cond))) { \ | ^~~~~~~~~~~~~~~~~~~~~ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/aten/RNN.cpp:224:3: note: in expansion of macro ‘TORCH_CHECK’ 224 | TORCH_CHECK( | ^~~~~~~~~~~ [100%] Linking CXX shared library libintel-ext-pt-cpu.so [100%] Built target intel-ext-pt-cpu Install the project... -- Install configuration: "Release" -- Set runtime path of "/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/lib/libintel-ext-pt-cpu.so" to "$ORIGIN" [ 14%] Building CXX object third_party/googletest/googletest/CMakeFiles/gtest.dir/src/gtest-all.cc.o [ 28%] Linking CXX static library ../../../lib/libgtest.a [ 28%] Built target gtest [ 42%] Building CXX object third_party/googletest/googletest/CMakeFiles/gtest_main.dir/src/gtest_main.cc.o [ 57%] Linking CXX static library ../../../lib/libgtest_main.a [ 57%] Built target gtest_main [ 71%] Building CXX object CMakeFiles/ipex_cpp_test.dir/test_runtime_api.cpp.o [ 85%] Building CXX object CMakeFiles/ipex_cpp_test.dir/test_dyndisp_and_isa_api.cpp.o [100%] Linking CXX executable ipex_cpp_test [100%] Built target ipex_cpp_test Install the project... -- Install configuration: "Release" -- Set runtime path of "/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/bin/ipex_cpp_test" to "$ORIGIN" [ 50%] Building CXX object intel_extension_for_pytorch/csrc/cpu/CMakeFiles/PY_CPU_OBJ.dir/Module.cpp.o [ 50%] Building CXX object intel_extension_for_pytorch/csrc/cpu/CMakeFiles/PY_CPU_OBJ.dir/TaskModule.cpp.o In file included from /usr/lib64/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h:9, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/Module.h:3, from /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/Module.cpp:1: /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h: In instantiation of ‘class pybind11::class_’: /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:2134:7: required from ‘class pybind11::enum_’ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/Module.cpp:145:44: required from here /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:1479:7: warning: ‘pybind11::class_’ declared with greater visibility than its base ‘pybind11::detail::generic_type’ [-Wattributes] 1479 | class class_ : public detail::generic_type { | ^~~~~~ /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h: In instantiation of ‘class pybind11::class_’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/Module.cpp:152:66: required from here /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:1479:7: warning: ‘pybind11::class_’ declared with greater visibility than its base ‘pybind11::detail::generic_type’ [-Wattributes] /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h: In instantiation of ‘struct pybind11::class_ >::is_valid_class_option >’: /usr/lib64/python3.10/site-packages/torch/include/pybind11/detail/../detail/common.h:658:39: required by substitution of ‘template using all_of = std::is_same, pybind11::detail::bools > [with Ts = {pybind11::class_ >::is_valid_class_option >}]’ /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:1496:70: required from ‘class pybind11::class_ >’ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/Module.cpp:160:66: required from here /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:1488:12: warning: ‘pybind11::class_ >::is_valid_class_option >’ declared with greater visibility than its base ‘pybind11::detail::negation, pybind11::detail::bools > >’ [-Wattributes] 1488 | struct is_valid_class_option : detail::any_of, is_subtype, is_base> {}; | ^~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h: In instantiation of ‘class pybind11::class_ >’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/Module.cpp:160:66: required from here /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:1479:7: warning: ‘pybind11::class_ >’ declared with greater visibility than its base ‘pybind11::detail::generic_type’ [-Wattributes] 1479 | class class_ : public detail::generic_type { | ^~~~~~ /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h: In instantiation of ‘struct pybind11::class_ >::is_valid_class_option >’: /usr/lib64/python3.10/site-packages/torch/include/pybind11/detail/../detail/common.h:658:39: required by substitution of ‘template using all_of = std::is_same, pybind11::detail::bools > [with Ts = {pybind11::class_ >::is_valid_class_option >}]’ /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:1496:70: required from ‘class pybind11::class_ >’ /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/Module.cpp:171:72: required from here /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:1488:12: warning: ‘pybind11::class_ >::is_valid_class_option >’ declared with greater visibility than its base ‘pybind11::detail::negation, pybind11::detail::bools > >’ [-Wattributes] 1488 | struct is_valid_class_option : detail::any_of, is_subtype, is_base> {}; | ^~~~~~~~~~~~~~~~~~~~~ /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h: In instantiation of ‘class pybind11::class_ >’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/Module.cpp:171:72: required from here /usr/lib64/python3.10/site-packages/torch/include/pybind11/pybind11.h:1479:7: warning: ‘pybind11::class_ >’ declared with greater visibility than its base ‘pybind11::detail::generic_type’ [-Wattributes] 1479 | class class_ : public detail::generic_type { | ^~~~~~ [ 50%] Built target PY_CPU_OBJ [ 75%] Building CXX object intel_extension_for_pytorch/csrc/CMakeFiles/intel-ext-pt-python.dir/init_python_bindings.cpp.o [100%] Linking CXX shared library libintel-ext-pt-python.so [100%] Built target intel-ext-pt-python Install the project... -- Install configuration: "Release" -- Set runtime path of "/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/lib/libintel-ext-pt-python.so" to "$ORIGIN" building 'intel_extension_for_pytorch._C' extension Emitting ninja build file /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/build.ninja... Compiling objects... Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) [1/1] g++ -MMD -MF /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/_C.o.d -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -fPIC -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc -I/usr/lib64/python3.10/site-packages/torch/include -I/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -I/usr/lib64/python3.10/site-packages/torch/include -I/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -I/usr/lib64/python3.10/site-packages/torch/include/TH -I/usr/lib64/python3.10/site-packages/torch/include/THC -I/usr/include/python3.10 -c -c /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/_C.cpp -o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/_C.o -Wall -Wextra -Wno-strict-overflow -Wno-unused-parameter -Wno-missing-field-initializers -Wno-write-strings -Wno-unknown-pragmas -Wno-deprecated-declarations -fno-strict-aliasing -Wno-missing-braces -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1017"' -DTORCH_EXTENSION_NAME=_C -D_GLIBCXX_USE_CXX11_ABI=1 -std=c++17 g++ -shared -Wl,-z,relro -Wl,--as-needed -Wl,-z,now -Wl,--build-id=sha1 -g -Wl,-z,relro -Wl,--as-needed -Wl,-z,now -Wl,--build-id=sha1 -g -Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/_C.o -Llib -L/usr/lib64/python3.10/site-packages/torch/lib -L/usr/lib64/python3.10/site-packages/torch/lib -L/usr/lib64 -Lbuild/Release/packages/intel_extension_for_pytorch/lib -lintel-ext-pt-python -lc10 -ltorch -ltorch_cpu -ltorch_python -lpython3.10 -o build/Release/packages/intel_extension_for_pytorch/_C.cpython-310-x86_64-linux-gnu.so -Wl,-rpath,$ORIGIN/lib building 'intel_extension_for_pytorch._isa_help' extension creating /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa creating /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/cpu creating /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/cpu/isa_help Emitting ninja build file /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/build.ninja... Compiling objects... Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) [1/3] g++ -MMD -MF /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/cpu/isa_help/isa_help.o.d -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -fPIC -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/isa -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc -I/usr/lib64/python3.10/site-packages/torch/include -I/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -I/usr/lib64/python3.10/site-packages/torch/include -I/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -I/usr/lib64/python3.10/site-packages/torch/include/TH -I/usr/lib64/python3.10/site-packages/torch/include/THC -I/usr/include/python3.10 -c -c /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/cpu/isa_help/isa_help.cpp -o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/cpu/isa_help/isa_help.o -Wall -Wextra -Wno-strict-overflow -Wno-unused-parameter -Wno-missing-field-initializers -Wno-write-strings -Wno-unknown-pragmas -Wno-deprecated-declarations -fno-strict-aliasing -Wno-missing-braces -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1017"' -DTORCH_EXTENSION_NAME=_isa_help -D_GLIBCXX_USE_CXX11_ABI=1 -std=c++17 [2/3] g++ -MMD -MF /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa/cpu_feature.o.d -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -fPIC -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/isa -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc -I/usr/lib64/python3.10/site-packages/torch/include -I/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -I/usr/lib64/python3.10/site-packages/torch/include -I/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -I/usr/lib64/python3.10/site-packages/torch/include/TH -I/usr/lib64/python3.10/site-packages/torch/include/THC -I/usr/include/python3.10 -c -c /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/isa/cpu_feature.cpp -o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa/cpu_feature.o -Wall -Wextra -Wno-strict-overflow -Wno-unused-parameter -Wno-missing-field-initializers -Wno-write-strings -Wno-unknown-pragmas -Wno-deprecated-declarations -fno-strict-aliasing -Wno-missing-braces -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1017"' -DTORCH_EXTENSION_NAME=_isa_help -D_GLIBCXX_USE_CXX11_ABI=1 -std=c++17 /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/isa/cpu_feature.cpp: In member function ‘bool torch_ipex::cpu::CPUFeature::os_avx2()’: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/isa/cpu_feature.cpp:177:14: warning: unused variable ‘max_sub_leaf’ [-Wunused-variable] 177 | uint32_t max_sub_leaf = 0; | ^~~~~~~~~~~~ [3/3] g++ -MMD -MF /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/_isa_help_main.o.d -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GNU_SOURCE -fPIC -fwrapv -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -fPIC -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/isa -I/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc -I/usr/lib64/python3.10/site-packages/torch/include -I/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -I/usr/lib64/python3.10/site-packages/torch/include -I/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -I/usr/lib64/python3.10/site-packages/torch/include/TH -I/usr/lib64/python3.10/site-packages/torch/include/THC -I/usr/include/python3.10 -c -c /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/intel_extension_for_pytorch/csrc/_isa_help_main.cpp -o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/_isa_help_main.o -Wall -Wextra -Wno-strict-overflow -Wno-unused-parameter -Wno-missing-field-initializers -Wno-write-strings -Wno-unknown-pragmas -Wno-deprecated-declarations -fno-strict-aliasing -Wno-missing-braces -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1017"' -DTORCH_EXTENSION_NAME=_isa_help -D_GLIBCXX_USE_CXX11_ABI=1 -std=c++17 g++ -shared -Wl,-z,relro -Wl,--as-needed -Wl,-z,now -Wl,--build-id=sha1 -g -Wl,-z,relro -Wl,--as-needed -Wl,-z,now -Wl,--build-id=sha1 -g -Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa/cpu_feature.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/_isa_help_main.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/cpu/isa_help/isa_help.o -Llib -L/usr/lib64/python3.10/site-packages/torch/lib -L/usr/lib64/python3.10/site-packages/torch/lib -L/usr/lib64 -Lbuild/Release/packages/intel_extension_for_pytorch/lib -lc10 -ltorch -ltorch_cpu -ltorch_python -lpython3.10 -o build/Release/packages/intel_extension_for_pytorch/_isa_help.cpython-310-x86_64-linux-gnu.so -Wl,-rpath,$ORIGIN/lib + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.EtcxoJ + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64 '!=' / ']' + rm -rf /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64 ++ dirname /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64 + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules' + export FCFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib64: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd intel-extension-for-pytorch-2.0.100+cpu + python3 setup.py install --prefix=/builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' Building Intel Extension for PyTorch. Version: 2.0.100+git25b7212 running install running build running build_py running build_ext running build_clib -- Caffe2: CUDA detected: 12.1 -- Caffe2: CUDA nvcc is: /usr/local/cuda/bin/nvcc -- Caffe2: CUDA toolkit directory: /usr/local/cuda -- Caffe2: Header version is: 12.1 -- USE_CUDNN is set to 0. Compiling without cuDNN support -- Automatic GPU detection failed. Building for common architectures. -- Autodetected CUDA architecture(s): 3.5;5.0;8.0;8.6;8.9;9.0 -- Added CUDA NVCC flags for: -gencode;arch=compute_35,code=sm_35;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_89,code=sm_89;-gencode;arch=compute_90,code=sm_90 CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:22 (message): static library kineto_LIBRARY-NOTFOUND not found. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:127 (append_torchlib_if_found) CMakeLists.txt:20 (find_package) Release build. -- cmake version: 3.26.3 CMake Deprecation Warning at third_party/ideep/mkl-dnn/CMakeLists.txt:36 (cmake_policy): The OLD behavior for policy CMP0025 will be removed from a future version of CMake. The cmake-policies(7) manual explains that the OLD behaviors of all policies are deprecated and that a policy should be set to OLD only under specific short-term circumstances. Projects should be ported to the NEW behavior and not rely on setting a policy to OLD. -- DNNL_TARGET_ARCH: X64 -- DNNL_LIBRARY_NAME: dnnl -- Could NOT find Doxyrest (missing: DOXYREST_EXECUTABLE) -- Could NOT find Sphinx (missing: SPHINX_EXECUTABLE) -- Enabled workload: TRAINING -- Enabled primitives: ALL -- Enabled primitive CPU ISA: ALL -- Enabled primitive GPU ISA: ALL -- Primitive cache is enabled -- Could NOT find Doxygen (missing: DOXYGEN_EXECUTABLE) -- Cannot find Doxygen package -- DNNL_GRAPH_BUILD_FOR_CI is set to be OFF -- Compiling oneDNN Graph with CPU runtime OMP support -- Compiling oneDNN Graph with GPU runtime NONE support -- Graph compiler backend is disabled. -- Set version definitions to /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/src/utils/verbose.cpp -- Compiled partition cache is enabled -- IPEX_CPU_CPP_TPP_SRCS: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/bert/fused_bert.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/common_loops.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/init.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/jit_compile.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/optim.cpp;/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/csrc/cpu/tpp/par_loop_generator.cpp CMake Warning at cmake/ClangFormat.cmake:19 (message): Please install clang-format-12 before contributing to IPEX! Call Stack (most recent call first): csrc/cpu/CMakeLists.txt:103 (include) Intel oneMKL found. -- Using ATen parallel backend: OMP -- -- ******** General Summary ******** -- General: -- CMake version : 3.26.3 -- CMake command : /usr/bin/cmake -- System : Linux -- Platform : Linux-4.18.0-305.19.1.el8_4.x86_64-x86_64-with-glibc2.36 -- Target name : intel_extension_for_pytorch -- Target version : 2.0.100 -- Install path : /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch -- Build type : Release -- Options: -- BUILD_WITH_CPU : ON -- BUILD_WITH_XPU : OFF -- BUILD_NO_CLANGFORMAT : OFF -- BUILD_STATS : OFF -- BUILD_STRIPPED_BIN : OFF -- -- ******** Summary on CPU ******** -- General: -- C compiler : /usr/bin/gcc -- C++ compiler : /usr/bin/g++ -- C++ compiler ID : GNU -- C++ compiler version : 12.2.1 -- CXX flags : -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -fPIC -Wno-narrowing -Wall -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-ignored-qualifiers -Wno-stringop-overflow -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fopenmp -faligned-new -Wno-unused-but-set-variable -Wno-uninitialized -fno-math-errno -fno-trapping-math -DNDEBUG -D_GLIBCXX_USE_CXX11_ABI=1 -- Compile definitions : AT_PARALLEL_OPENMP=1 -- CXX Linker options : -Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 -Wl,-Bsymbolic-functions -Wl,--disable-new-dtags -- Link libraries : /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/libxsmm/lib/libxsmm.a;dnnl_graph;-Wl,--start-group;/builddir/build/BUILD/mkl/lib/libmkl_intel_lp64.a;/builddir/build/BUILD/mkl/lib/libmkl_core.a;/builddir/build/BUILD/mkl/lib/libmkl_gnu_thread.a;-Wl,--end-group;/usr/lib64/python3.10/site-packages/torch/lib/libtorch_cpu.so;/usr/lib64/python3.10/site-packages/torch/lib/libc10.so -- Torch version : 2.0.1 -- Torch include : /usr/lib64/python3.10/site-packages/torch/include;/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -- oneDNN include : /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/third_party/ideep/mkl-dnn/include -- oneMKL include : /builddir/build/BUILD/mkl/include -- Options: -- BUILD_STATIC_ONEMKL : ON -- IPEX_DISP_OP : OFF -- -- ******************************** -- Configuring done (6.4s) -- Generating done (0.1s) -- Build files have been written to: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu -- Caffe2: CUDA detected: 12.1 -- Caffe2: CUDA nvcc is: /usr/local/cuda/bin/nvcc -- Caffe2: CUDA toolkit directory: /usr/local/cuda -- Caffe2: Header version is: 12.1 -- USE_CUDNN is set to 0. Compiling without cuDNN support -- Automatic GPU detection failed. Building for common architectures. -- Autodetected CUDA architecture(s): 3.5;5.0;8.0;8.6;8.9;9.0 -- Added CUDA NVCC flags for: -gencode;arch=compute_35,code=sm_35;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_89,code=sm_89;-gencode;arch=compute_90,code=sm_90 CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:22 (message): static library kineto_LIBRARY-NOTFOUND not found. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:127 (append_torchlib_if_found) CMakeLists.txt:25 (find_package) Intel oneMKL found. -- Configuring done (7.2s) -- Generating done (0.0s) -- Build files have been written to: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/tests/cpu/cpp -- Caffe2: CUDA detected: 12.1 -- Caffe2: CUDA nvcc is: /usr/local/cuda/bin/nvcc -- Caffe2: CUDA toolkit directory: /usr/local/cuda -- Caffe2: Header version is: 12.1 -- USE_CUDNN is set to 0. Compiling without cuDNN support -- Automatic GPU detection failed. Building for common architectures. -- Autodetected CUDA architecture(s): 3.5;5.0;8.0;8.6;8.9;9.0 -- Added CUDA NVCC flags for: -gencode;arch=compute_35,code=sm_35;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_89,code=sm_89;-gencode;arch=compute_90,code=sm_90 CMake Warning at /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:22 (message): static library kineto_LIBRARY-NOTFOUND not found. Call Stack (most recent call first): /usr/lib64/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:127 (append_torchlib_if_found) CMakeLists.txt:20 (find_package) -- pybind11 found Release build. CMake Warning at cmake/ClangFormat.cmake:19 (message): Please install clang-format-12 before contributing to IPEX! Call Stack (most recent call first): intel_extension_for_pytorch/csrc/CMakeLists.txt:33 (include) -- -- ******** General Summary ******** -- General: -- CMake version : 3.26.3 -- CMake command : /usr/bin/cmake -- System : Linux -- Platform : Linux-4.18.0-305.19.1.el8_4.x86_64-x86_64-with-glibc2.36 -- Target name : intel_extension_for_pytorch -- Target version : 2.0.100 -- Install path : /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch -- Build type : Release -- Options: -- BUILD_WITH_CPU : ON -- BUILD_WITH_XPU : OFF -- BUILD_NO_CLANGFORMAT : OFF -- BUILD_STATS : OFF -- BUILD_STRIPPED_BIN : OFF -- -- ******** Summary on Python ******** -- General: -- C compiler : /usr/bin/gcc -- C++ compiler : /usr/bin/g++ -- C++ compiler ID : GNU -- C++ compiler version : 12.2.1 -- CXX flags : -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -D_GLIBCXX_USE_CXX11_ABI=1 -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1017\" -- CXX Linker options : -Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 -Wl,--disable-new-dtags -- Link libraries : intel-ext-pt-cpu -- Torch version : 2.0.1 -- Torch include : /usr/lib64/python3.10/site-packages/torch/include;/usr/lib64/python3.10/site-packages/torch/include/torch/csrc/api/include -- Python include : /usr/include/python3.10 -- pybind11 include : /usr/lib64/python3.10/site-packages/torch/include -- -- *********************************** -- Configuring done (6.7s) -- Generating done (0.0s) -- Build files have been written to: /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc [ 1%] Built target libxsmm [ 1%] Built target dnnl_graph_backend_fake [ 2%] Built target dnnl_graph_utils [ 4%] Built target dnnl_graph_common [ 8%] Built target dnnl_graph_backend_dnnl [ 15%] Built target dnnl_common [ 27%] Built target dnnl_cpu [ 56%] Built target dnnl_cpu_x64 [ 56%] Built target dnnl [ 56%] Built target dnnl_graph [100%] Built target intel-ext-pt-cpu Install the project... -- Install configuration: "Release" [ 28%] Built target gtest [ 57%] Built target gtest_main [ 71%] Linking CXX executable ipex_cpp_test [100%] Built target ipex_cpp_test Install the project... -- Install configuration: "Release" -- Set runtime path of "/builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/packages/intel_extension_for_pytorch/bin/ipex_cpp_test" to "$ORIGIN" [ 50%] Built target PY_CPU_OBJ [100%] Built target intel-ext-pt-python Install the project... -- Install configuration: "Release" building 'intel_extension_for_pytorch._C' extension Emitting ninja build file /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/build.ninja... Compiling objects... Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) ninja: no work to do. g++ -shared -Wl,-z,relro -Wl,--as-needed -Wl,-z,now -Wl,--build-id=sha1 -g -Wl,-z,relro -Wl,--as-needed -Wl,-z,now -Wl,--build-id=sha1 -g -Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/_C.o -Llib -L/usr/lib64/python3.10/site-packages/torch/lib -L/usr/lib64/python3.10/site-packages/torch/lib -L/usr/lib64 -Lbuild/Release/packages/intel_extension_for_pytorch/lib -lintel-ext-pt-python -lc10 -ltorch -ltorch_cpu -ltorch_python -lpython3.10 -o build/Release/packages/intel_extension_for_pytorch/_C.cpython-310-x86_64-linux-gnu.so -Wl,-rpath,$ORIGIN/lib building 'intel_extension_for_pytorch._isa_help' extension Emitting ninja build file /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/build.ninja... Compiling objects... Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) ninja: no work to do. g++ -shared -Wl,-z,relro -Wl,--as-needed -Wl,-z,now -Wl,--build-id=sha1 -g -Wl,-z,relro -Wl,--as-needed -Wl,-z,now -Wl,--build-id=sha1 -g -Wl,-z,relro -Wl,--as-needed -Wl,--build-id=sha1 -O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -m64 -march=x86-64-v2 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/csrc/cpu/isa/cpu_feature.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/_isa_help_main.o /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu/build/Release/intel_extension_for_pytorch/csrc/cpu/isa_help/isa_help.o -Llib -L/usr/lib64/python3.10/site-packages/torch/lib -L/usr/lib64/python3.10/site-packages/torch/lib -L/usr/lib64 -Lbuild/Release/packages/intel_extension_for_pytorch/lib -lc10 -ltorch -ltorch_cpu -ltorch_python -lpython3.10 -o build/Release/packages/intel_extension_for_pytorch/_isa_help.cpython-310-x86_64-linux-gnu.so -Wl,-rpath,$ORIGIN/lib running install_lib creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64 creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10 creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/autocast copying build/Release/packages/intel_extension_for_pytorch/cpu/autocast/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/autocast copying build/Release/packages/intel_extension_for_pytorch/cpu/autocast/_autocast_mode.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/autocast copying build/Release/packages/intel_extension_for_pytorch/cpu/autocast/_grad_scaler.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/autocast creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/conf copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/conf/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/conf copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/conf/config.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/conf copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/conf/dotdict.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/conf creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/example copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/example/resnet50.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/example creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/strategy/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/strategy/grid.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/strategy/random.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/strategy/strategy.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/__main__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune copying build/Release/packages/intel_extension_for_pytorch/cpu/hypertune/objective.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch copying build/Release/packages/intel_extension_for_pytorch/cpu/launch/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch copying build/Release/packages/intel_extension_for_pytorch/cpu/launch/__main__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch copying build/Release/packages/intel_extension_for_pytorch/cpu/launch/cpu_info.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch copying build/Release/packages/intel_extension_for_pytorch/cpu/launch/launcher_base.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch copying build/Release/packages/intel_extension_for_pytorch/cpu/launch/launcher_distributed.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch copying build/Release/packages/intel_extension_for_pytorch/cpu/launch/launcher_multi_instances.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime copying build/Release/packages/intel_extension_for_pytorch/cpu/runtime/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime copying build/Release/packages/intel_extension_for_pytorch/cpu/runtime/cpupool.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime copying build/Release/packages/intel_extension_for_pytorch/cpu/runtime/multi_stream.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime copying build/Release/packages/intel_extension_for_pytorch/cpu/runtime/runtime_utils.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime copying build/Release/packages/intel_extension_for_pytorch/cpu/runtime/task.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime copying build/Release/packages/intel_extension_for_pytorch/cpu/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu copying build/Release/packages/intel_extension_for_pytorch/cpu/_auto_kernel_selection.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu copying build/Release/packages/intel_extension_for_pytorch/cpu/_cpu_isa.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu copying build/Release/packages/intel_extension_for_pytorch/cpu/auto_ipex.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/jit copying build/Release/packages/intel_extension_for_pytorch/jit/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/jit copying build/Release/packages/intel_extension_for_pytorch/jit/_trace.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/jit creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional copying build/Release/packages/intel_extension_for_pytorch/nn/functional/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional copying build/Release/packages/intel_extension_for_pytorch/nn/functional/_embeddingbag.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional copying build/Release/packages/intel_extension_for_pytorch/nn/functional/_roi_align.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional copying build/Release/packages/intel_extension_for_pytorch/nn/functional/_tensor_method.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional copying build/Release/packages/intel_extension_for_pytorch/nn/functional/interaction.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules copying build/Release/packages/intel_extension_for_pytorch/nn/modules/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules copying build/Release/packages/intel_extension_for_pytorch/nn/modules/_roi_align.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules copying build/Release/packages/intel_extension_for_pytorch/nn/modules/frozen_batch_norm.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules copying build/Release/packages/intel_extension_for_pytorch/nn/modules/linear_fuse_eltwise.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules copying build/Release/packages/intel_extension_for_pytorch/nn/modules/merged_embeddingbag.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils copying build/Release/packages/intel_extension_for_pytorch/nn/utils/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils copying build/Release/packages/intel_extension_for_pytorch/nn/utils/_model_convert.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils copying build/Release/packages/intel_extension_for_pytorch/nn/utils/_weight_cast.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils copying build/Release/packages/intel_extension_for_pytorch/nn/utils/_weight_prepack.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils copying build/Release/packages/intel_extension_for_pytorch/nn/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim copying build/Release/packages/intel_extension_for_pytorch/optim/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim copying build/Release/packages/intel_extension_for_pytorch/optim/_functional.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim copying build/Release/packages/intel_extension_for_pytorch/optim/_lamb.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim copying build/Release/packages/intel_extension_for_pytorch/optim/_optimizer_utils.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_autotune.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_module_swap_utils.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_qconfig.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_quantization_state.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_quantization_state_utils.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_quantize.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_quantize_utils.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_recipe.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization copying build/Release/packages/intel_extension_for_pytorch/quantization/_utils.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils copying build/Release/packages/intel_extension_for_pytorch/utils/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils copying build/Release/packages/intel_extension_for_pytorch/utils/_custom_fx_tracer.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils copying build/Release/packages/intel_extension_for_pytorch/utils/channels_last_1d.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils copying build/Release/packages/intel_extension_for_pytorch/utils/linear_bn_folding.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils copying build/Release/packages/intel_extension_for_pytorch/utils/verbose.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/amp copying build/Release/packages/intel_extension_for_pytorch/xpu/amp/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/amp copying build/Release/packages/intel_extension_for_pytorch/xpu/amp/autocast_mode.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/amp creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/intrinsic creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/intrinsic/modules copying build/Release/packages/intel_extension_for_pytorch/xpu/intrinsic/modules/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/intrinsic/modules copying build/Release/packages/intel_extension_for_pytorch/xpu/intrinsic/modules/intrinsic.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/intrinsic/modules copying build/Release/packages/intel_extension_for_pytorch/xpu/intrinsic/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/intrinsic copying build/Release/packages/intel_extension_for_pytorch/xpu/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu copying build/Release/packages/intel_extension_for_pytorch/xpu/_utils.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu copying build/Release/packages/intel_extension_for_pytorch/xpu/cpp_extension.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu copying build/Release/packages/intel_extension_for_pytorch/xpu/lazy_init.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu copying build/Release/packages/intel_extension_for_pytorch/xpu/memory.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu copying build/Release/packages/intel_extension_for_pytorch/xpu/random.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu copying build/Release/packages/intel_extension_for_pytorch/xpu/single_card.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu copying build/Release/packages/intel_extension_for_pytorch/xpu/streams.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu copying build/Release/packages/intel_extension_for_pytorch/xpu/utils.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp/utils copying build/Release/packages/intel_extension_for_pytorch/tpp/utils/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp/utils copying build/Release/packages/intel_extension_for_pytorch/tpp/utils/blocked_layout.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp/utils copying build/Release/packages/intel_extension_for_pytorch/tpp/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp copying build/Release/packages/intel_extension_for_pytorch/tpp/fused_bert.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp copying build/Release/packages/intel_extension_for_pytorch/tpp/optim.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp copying build/Release/packages/intel_extension_for_pytorch/__init__.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch copying build/Release/packages/intel_extension_for_pytorch/frontend.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch copying build/Release/packages/intel_extension_for_pytorch/_version.py -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/lib copying build/Release/packages/intel_extension_for_pytorch/lib/libintel-ext-pt-cpu.so -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/lib copying build/Release/packages/intel_extension_for_pytorch/lib/libintel-ext-pt-python.so -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/lib creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/share creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/share/cmake creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/share/cmake/IPEX copying build/Release/packages/intel_extension_for_pytorch/share/cmake/IPEX/IPEXConfig.cmake -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/share/cmake/IPEX copying build/Release/packages/intel_extension_for_pytorch/share/cmake/IPEX/IPEXVersionConfig.cmake -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/share/cmake/IPEX copying build/Release/packages/intel_extension_for_pytorch/LICENSE -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch creating /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/bin copying build/Release/packages/intel_extension_for_pytorch/bin/ipex_cpp_test -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/bin copying build/Release/packages/intel_extension_for_pytorch/_C.cpython-310-x86_64-linux-gnu.so -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch copying build/Release/packages/intel_extension_for_pytorch/_isa_help.cpython-310-x86_64-linux-gnu.so -> /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/autocast/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/autocast/_autocast_mode.py to _autocast_mode.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/autocast/_grad_scaler.py to _grad_scaler.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/conf/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/conf/config.py to config.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/conf/dotdict.py to dotdict.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/example/resnet50.py to resnet50.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy/grid.py to grid.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy/random.py to random.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/strategy/strategy.py to strategy.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/__main__.py to __main__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/hypertune/objective.py to objective.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch/__main__.py to __main__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch/cpu_info.py to cpu_info.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch/launcher_base.py to launcher_base.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch/launcher_distributed.py to launcher_distributed.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/launch/launcher_multi_instances.py to launcher_multi_instances.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime/cpupool.py to cpupool.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime/multi_stream.py to multi_stream.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime/runtime_utils.py to runtime_utils.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/runtime/task.py to task.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/_auto_kernel_selection.py to _auto_kernel_selection.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/_cpu_isa.py to _cpu_isa.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/cpu/auto_ipex.py to auto_ipex.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/jit/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/jit/_trace.py to _trace.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional/_embeddingbag.py to _embeddingbag.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional/_roi_align.py to _roi_align.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional/_tensor_method.py to _tensor_method.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/functional/interaction.py to interaction.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules/_roi_align.py to _roi_align.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules/frozen_batch_norm.py to frozen_batch_norm.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules/linear_fuse_eltwise.py to linear_fuse_eltwise.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/modules/merged_embeddingbag.py to merged_embeddingbag.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils/_model_convert.py to _model_convert.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils/_weight_cast.py to _weight_cast.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/utils/_weight_prepack.py to _weight_prepack.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/nn/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim/_functional.py to _functional.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim/_lamb.py to _lamb.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/optim/_optimizer_utils.py to _optimizer_utils.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_autotune.py to _autotune.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_module_swap_utils.py to _module_swap_utils.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_qconfig.py to _qconfig.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_quantization_state.py to _quantization_state.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_quantization_state_utils.py to _quantization_state_utils.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_quantize.py to _quantize.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_quantize_utils.py to _quantize_utils.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_recipe.py to _recipe.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/quantization/_utils.py to _utils.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils/_custom_fx_tracer.py to _custom_fx_tracer.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils/channels_last_1d.py to channels_last_1d.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils/linear_bn_folding.py to linear_bn_folding.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/utils/verbose.py to verbose.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/amp/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/amp/autocast_mode.py to autocast_mode.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/intrinsic/modules/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/intrinsic/modules/intrinsic.py to intrinsic.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/intrinsic/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/_utils.py to _utils.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/cpp_extension.py to cpp_extension.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/lazy_init.py to lazy_init.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/memory.py to memory.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/random.py to random.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/single_card.py to single_card.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/streams.py to streams.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/xpu/utils.py to utils.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp/utils/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp/utils/blocked_layout.py to blocked_layout.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp/fused_bert.py to fused_bert.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/tpp/optim.py to optim.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/__init__.py to __init__.cpython-310.pyc byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/frontend.py to frontend.cpython-310.pyc /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/frontend.py:732: SyntaxWarning: assertion is always true, perhaps remove parentheses? assert(dtype == torch.float or dtype == torch.bfloat16, "TPP only supports torch.float and torch.bfloat16.") byte-compiling /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/_version.py to _version.cpython-310.pyc running install_egg_info running egg_info creating build/Release/packages/intel_extension_for_pytorch.egg-info writing build/Release/packages/intel_extension_for_pytorch.egg-info/PKG-INFO writing dependency_links to build/Release/packages/intel_extension_for_pytorch.egg-info/dependency_links.txt writing entry points to build/Release/packages/intel_extension_for_pytorch.egg-info/entry_points.txt writing requirements to build/Release/packages/intel_extension_for_pytorch.egg-info/requires.txt writing top-level names to build/Release/packages/intel_extension_for_pytorch.egg-info/top_level.txt writing manifest file 'build/Release/packages/intel_extension_for_pytorch.egg-info/SOURCES.txt' /usr/lib/python3.10/site-packages/setuptools/command/egg_info.py:643: SetuptoolsDeprecationWarning: Custom 'build_py' does not implement 'get_data_files_without_manifest'. Please extend command classes from setuptools instead of distutils. warnings.warn( reading manifest file 'build/Release/packages/intel_extension_for_pytorch.egg-info/SOURCES.txt' adding license file 'LICENSE' writing manifest file 'build/Release/packages/intel_extension_for_pytorch.egg-info/SOURCES.txt' Copying build/Release/packages/intel_extension_for_pytorch.egg-info to /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch-2.0.100+git25b7212-py3.10.egg-info + /usr/bin/find-debuginfo -j80 --strict-build-id -m -i --build-id-seed 2.0.100-1.an23 --unique-debug-suffix -2.0.100-1.an23.x86_64 --unique-debug-src-base intel-extension-for-pytorch-2.0.100-1.an23.x86_64 --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 110000000 -S debugsourcefiles.list /builddir/build/BUILD/intel-extension-for-pytorch-2.0.100+cpu extracting debug info from /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/_isa_help.cpython-310-x86_64-linux-gnu.so extracting debug info from /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/bin/ipex_cpp_test extracting debug info from /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/lib/libintel-ext-pt-python.so extracting debug info from /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/_C.cpython-310-x86_64-linux-gnu.so extracting debug info from /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/lib/libintel-ext-pt-cpu.so Support for debuginfod is not compiled into GDB. Support for debuginfod is not compiled into GDB. Support for debuginfod is not compiled into GDB. Support for debuginfod is not compiled into GDB. Support for debuginfod is not compiled into GDB. original debug info size: 1316448kB, size after compression: 971612kB /usr/bin/sepdebugcrcfix: Updated 5 CRC32s, 0 CRC32s did match. 50264 blocks + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/anolis/brp-ldconfig + COMPRESS='zstd -f --rm -19 -T0' + COMPRESS_EXT=.zst + /usr/lib/rpm/brp-compress + /usr/lib/rpm/anolis/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/brp-remove-la-files + /usr/lib/rpm/anolis/clean_perl + /usr/lib/rpm/anolis/check_elf_files Warning: unused libraries in /usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/_C.cpython-310-x86_64-linux-gnu.so: libtorch_cpu.so libtorch_python.so Warning: undefined symbols in /usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/lib/libintel-ext-pt-python.so: PyInstanceMethod_Type PyExc_ValueError _Py_TrueStruct PyExc_IndexError PyCapsule_Type PyModule_Type _Py_NoneStruct PyExc_MemoryError PyType_Type PyByteArray_Type PyCFunction_Type PyExc_OverflowError PyProperty_Type PyExc_BufferError PyExc_RuntimeError _Py_NotImplementedStruct PyBaseObject_Type PyExc_TypeError PyMethod_Type THPDtypeType _Py_FalseStruct PyDict_Type PyFloat_Type PyExc_SystemError PyObject_GenericGetDict PyObject_GenericSetDict PyTuple_SetItem PyObject_Repr PyLong_AsLong PyLong_FromSsize_t PyByteArray_Size PyObject_Call PyNumber_And _ZN5torch3jit10toPyObjectEN3c106IValueE PyErr_NormalizeException PyInstanceMethod_New PyEval_AcquireThread PyObject_Str PyThreadState_DeleteCurrent PyGILState_GetThisThreadState PyObject_GetAttrString PyMem_Free PyErr_Restore _ZN5torch11getTHPDtypeEN3c1010ScalarTypeE PyType_IsSubtype PyModule_AddObject PyCapsule_SetPointer PyTuple_New PyObject_SetAttr PyObject_IsInstance PyEval_RestoreThread PyException_SetTraceback PyNumber_Float PyUnicode_FromFormat PyList_Append PyDict_Contains PyDict_Next PyList_Size PyTuple_Size PyNumber_Long PyBuffer_Release PyObject_GetIter PyErr_Format PyObject_CallObject PyFloat_FromDouble PyFloat_AsDouble PyUnicode_DecodeUTF8 _ZN8pybind116detail11type_casterIN2at6TensorEvE4castERKS3_NS_19return_value_policyENS_6handleE _Py_Dealloc PyByteArray_AsString _ZN5torch9TypeErrorC1EPKcz PyList_New PyNumber_Check _PyObject_GetDictPtr PyUnicode_FromString PyIndex_Check PyCapsule_SetContext PyFrame_GetLineNumber PyThread_tss_get PyBytes_Size PySequence_Check PyList_GetItem PyException_SetContext PyErr_Clear PyObject_HasAttrString PyWeakref_NewRef PyDict_New PyErr_SetString PyCapsule_GetContext PyThreadState_Get PyObject_SetItem PyCapsule_New PyMem_Calloc PyObject_SetAttrString PyGILState_Release PyCapsule_GetPointer PyNumber_Xor PyThread_tss_alloc PyBytes_AsString PyDict_GetItemWithError PyThread_tss_set PyObject_GetItem PyType_Ready PyEval_SaveThread PySequence_GetItem PyNumber_Invert PyObject_ClearWeakRefs PySequence_Size PyLong_FromLong PyEval_GetBuiltins PyErr_WriteUnraisable PyObject_RichCompareBool PyNumber_Or PyThread_tss_create PyLong_AsUnsignedLong PyFrame_GetBack PyUnicode_AsEncodedString PyErr_Occurred PyDict_Copy PyErr_Fetch PyThreadState_New _PyThreadState_UncheckedGet _PyType_Lookup PyObject_CallFunctionObjArgs _ZN5torch3jit8toIValueEN8pybind116handleERKN3c104Type24SingletonOrSharedTypePtrIS4_EENS3_8optionalIiEE PyDict_Size _ZN8pybind116detail11type_casterIN2at6TensorEvE4loadENS_6handleEb PyIter_Next PySequence_Tuple PyThreadState_Clear PyDict_DelItemString PyUnicode_AsUTF8AndSize PyGILState_Ensure PyObject_Malloc PyCMethod_New PyCapsule_GetName PyTuple_GetItem PyFrame_GetCode PyException_SetCause PyUnicode_AsUTF8String PyBytes_AsStringAndSize + /usr/lib/rpm/anolis/brp-mangle-shebangs + /usr/lib/rpm/anolis/remove-info-dir + /usr/lib/rpm/anolis/check-desktop-files + /usr/lib/rpm/anolis/brp-python-bytecompile '' 1 0 Bytecompiling .py files below /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib64/python3.10 using python3.10 /usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/frontend.py:732: SyntaxWarning: assertion is always true, perhaps remove parentheses? /usr/lib64/python3.10/site-packages/intel_extension_for_pytorch/frontend.py:732: SyntaxWarning: assertion is always true, perhaps remove parentheses? Bytecompiling .py files below /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/lib/debug/usr/lib64/python3.10 using python3.10 + /usr/lib/rpm/anolis/brp-python-hardlink Processing files: intel-extension-for-pytorch-2.0.100-1.an23.x86_64 Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.I8JtVy + umask 022 + cd /builddir/build/BUILD + cd intel-extension-for-pytorch-2.0.100+cpu + DOCDIR=/builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/share/doc/intel-extension-for-pytorch + export LC_ALL=C + LC_ALL=C + export DOCDIR + /usr/bin/mkdir -p /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/share/doc/intel-extension-for-pytorch + cp -pr README.md /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/share/doc/intel-extension-for-pytorch + RPM_EC=0 ++ jobs -p + exit 0 Executing(%license): /bin/sh -e /var/tmp/rpm-tmp.Fi2BVR + umask 022 + cd /builddir/build/BUILD + cd intel-extension-for-pytorch-2.0.100+cpu + LICENSEDIR=/builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/share/licenses/intel-extension-for-pytorch + export LC_ALL=C + LC_ALL=C + export LICENSEDIR + /usr/bin/mkdir -p /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/share/licenses/intel-extension-for-pytorch + cp -pr LICENSE /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64/usr/share/licenses/intel-extension-for-pytorch + RPM_EC=0 ++ jobs -p + exit 0 Provides: intel-extension-for-pytorch = 2.0.100-1.an23 intel-extension-for-pytorch(x86-64) = 2.0.100-1.an23 libintel-ext-pt-cpu.so()(64bit) libintel-ext-pt-python.so()(64bit) python3.10dist(intel-extension-for-pytorch) = 2.0.100 python3dist(intel-extension-for-pytorch) = 2.0.100 Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PartialHardlinkSets) <= 4.0.4-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 Requires: ld-linux-x86-64.so.2()(64bit) ld-linux-x86-64.so.2(GLIBC_2.3)(64bit) libc.so.6()(64bit) libc.so.6(GLIBC_2.14)(64bit) libc.so.6(GLIBC_2.17)(64bit) libc.so.6(GLIBC_2.2.5)(64bit) libc.so.6(GLIBC_2.3)(64bit) libc.so.6(GLIBC_2.3.4)(64bit) libc.so.6(GLIBC_2.32)(64bit) libc.so.6(GLIBC_2.33)(64bit) libc.so.6(GLIBC_2.34)(64bit) libc.so.6(GLIBC_2.4)(64bit) libc.so.6(GLIBC_2.7)(64bit) libc10.so()(64bit) libgcc_s.so.1()(64bit) libgcc_s.so.1(GCC_3.0)(64bit) libgomp.so.1()(64bit) libgomp.so.1(GOMP_1.0)(64bit) libgomp.so.1(GOMP_4.0)(64bit) libgomp.so.1(OMP_1.0)(64bit) libintel-ext-pt-cpu.so()(64bit) libintel-ext-pt-python.so()(64bit) libm.so.6()(64bit) libm.so.6(GLIBC_2.2.5)(64bit) libm.so.6(GLIBC_2.27)(64bit) libm.so.6(GLIBC_2.29)(64bit) libpython3.10.so.1.0()(64bit) libstdc++.so.6()(64bit) libstdc++.so.6(CXXABI_1.3)(64bit) libstdc++.so.6(CXXABI_1.3.11)(64bit) libstdc++.so.6(CXXABI_1.3.13)(64bit) libstdc++.so.6(CXXABI_1.3.2)(64bit) libstdc++.so.6(CXXABI_1.3.3)(64bit) libstdc++.so.6(CXXABI_1.3.5)(64bit) libstdc++.so.6(CXXABI_1.3.7)(64bit) libstdc++.so.6(CXXABI_1.3.8)(64bit) libstdc++.so.6(CXXABI_1.3.9)(64bit) libstdc++.so.6(GLIBCXX_3.4)(64bit) libstdc++.so.6(GLIBCXX_3.4.10)(64bit) libstdc++.so.6(GLIBCXX_3.4.11)(64bit) libstdc++.so.6(GLIBCXX_3.4.14)(64bit) libstdc++.so.6(GLIBCXX_3.4.15)(64bit) libstdc++.so.6(GLIBCXX_3.4.18)(64bit) libstdc++.so.6(GLIBCXX_3.4.19)(64bit) libstdc++.so.6(GLIBCXX_3.4.20)(64bit) libstdc++.so.6(GLIBCXX_3.4.21)(64bit) libstdc++.so.6(GLIBCXX_3.4.22)(64bit) libstdc++.so.6(GLIBCXX_3.4.26)(64bit) libstdc++.so.6(GLIBCXX_3.4.29)(64bit) libstdc++.so.6(GLIBCXX_3.4.30)(64bit) libstdc++.so.6(GLIBCXX_3.4.9)(64bit) libtorch_cpu.so()(64bit) libtorch_python.so()(64bit) python(abi) = 3.10 python3.10dist(numpy) python3.10dist(psutil) rtld(GNU_HASH) Processing files: intel-extension-for-pytorch-debugsource-2.0.100-1.an23.x86_64 Provides: intel-extension-for-pytorch-debugsource = 2.0.100-1.an23 intel-extension-for-pytorch-debugsource(x86-64) = 2.0.100-1.an23 Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 Processing files: intel-extension-for-pytorch-debuginfo-2.0.100-1.an23.x86_64 Provides: debuginfo(build-id) = 2cee0939cd0e94fe6ce36c2fb2b4bbe379753085 debuginfo(build-id) = 6a143e4ba7f5151f55413683f6da9e3e3edcac91 debuginfo(build-id) = a9220f4457bc64f2cb34df24ef63ed5ac6a0a858 debuginfo(build-id) = e6251168af34dd7d288c9ba34872d763a4545789 debuginfo(build-id) = f93cc001631b1849c4ce9003441d29bad7defc72 intel-extension-for-pytorch-debuginfo = 2.0.100-1.an23 intel-extension-for-pytorch-debuginfo(x86-64) = 2.0.100-1.an23 libintel-ext-pt-cpu.so-2.0.100-1.an23.x86_64.debug()(64bit) libintel-ext-pt-python.so-2.0.100-1.an23.x86_64.debug()(64bit) Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 Recommends: intel-extension-for-pytorch-debugsource(x86-64) = 2.0.100-1.an23 Checking for unpackaged file(s): /usr/lib/rpm/check-files /builddir/build/BUILDROOT/intel-extension-for-pytorch-2.0.100-1.an23.x86_64 Wrote: /builddir/build/RPMS/intel-extension-for-pytorch-debugsource-2.0.100-1.an23.x86_64.rpm Wrote: /builddir/build/RPMS/intel-extension-for-pytorch-2.0.100-1.an23.x86_64.rpm Wrote: /builddir/build/RPMS/intel-extension-for-pytorch-debuginfo-2.0.100-1.an23.x86_64.rpm Child return code was: 0